From 1abbfa9eb7c7dc0877592d6e28a0aea95bdfe55f Mon Sep 17 00:00:00 2001 From: koko2pp Date: Fri, 15 Sep 2023 15:17:19 +0800 Subject: [PATCH] translate Chinese annotations to English annotations Signed-off-by: koko2pp --- WORKSPACE | 2 +- build.sh | 10 +- conf/chunkserver.conf | 144 +- conf/chunkserver.conf.example | 145 +- conf/client.conf | 118 +- conf/cs_client.conf | 116 +- conf/mds.conf | 170 +- conf/py_client.conf | 112 +- conf/snap_client.conf | 116 +- conf/snapshot_clone_server.conf | 80 +- conf/tools.conf | 10 +- curve-ansible/client.ini | 2 +- .../wait_copysets_status_healthy.yml | 2 +- curve-ansible/group_vars/mds.yml | 2 +- .../roles/generate_config/defaults/main.yml | 20 +- .../templates/chunkserver.conf.j2 | 138 +- .../generate_config/templates/client.conf.j2 | 116 +- .../generate_config/templates/mds.conf.j2 | 166 +- .../templates/nebd-client.conf.j2 | 22 +- .../templates/nebd-server.conf.j2 | 10 +- .../templates/snapshot_clone_server.conf.j2 | 80 +- .../generate_config/templates/tools.conf.j2 | 10 +- .../install_package/files/disk_uuid_repair.py | 40 +- .../templates/chunkserver_ctl.sh.j2 | 24 +- .../templates/chunkserver_deploy.sh.j2 | 32 +- .../templates/etcd-daemon.sh.j2 | 44 +- .../templates/mds-daemon.sh.j2 | 52 +- .../install_package/templates/nebd-daemon.j2 | 8 +- .../templates/snapshot-daemon.sh.j2 | 52 +- .../roles/install_package/vars/main.yml | 2 +- .../roles/restart_service/defaults/main.yml | 2 +- .../tasks/include/restart_mds.yml | 2 +- .../tasks/include/restart_snapshotclone.yml | 2 +- .../roles/restart_service/tasks/main.yml | 2 +- .../roles/restart_service/vars/main.yml | 2 +- .../vars/main.yml | 2 +- .../tasks/include/start_chunkserver.yml | 2 +- .../roles/start_service/tasks/main.yml | 2 +- .../roles/stop_service/tasks/main.yml | 2 +- curve-ansible/rolling_update_curve.yml | 14 +- curve-ansible/server.ini | 14 +- curvefs/conf/curvebs_client.conf | 120 +- curvefs/monitor/grafana-report.py | 18 +- .../grafana/provisioning/dashboards/mds.json | 8 +- .../metaserverclient/metaserver_client.cpp | 2 +- .../src/metaserver/copyset/conf_epoch_file.h | 38 +- curvefs/src/metaserver/inflight_throttle.h | 14 +- .../test/mds/schedule/coordinator_test.cpp | 54 +- .../test/mds/schedule/operatorStep_test.cpp | 10 +- .../mds/schedule/recoverScheduler_test.cpp | 12 +- .../mds/schedule/scheduleMetrics_test.cpp | 24 +- .../scheduleService/scheduleService_test.cpp | 4 +- curvefs/test/volume/bitmap_allocator_test.cpp | 2 +- curvefs_python/cbd_client.h | 6 +- curvefs_python/curve_type.h | 52 +- curvefs_python/curvefs_tool.py | 8 +- curvefs_python/libcurvefs.h | 6 +- curvefs_python/test.py | 4 +- curvesnapshot_python/libcurveSnapshot.cpp | 24 +- curvesnapshot_python/libcurveSnapshot.h | 132 +- .../local/chunkserver/conf/chunkserver.conf.0 | 4 +- .../local/chunkserver/conf/chunkserver.conf.1 | 4 +- .../local/chunkserver/conf/chunkserver.conf.2 | 4 +- include/chunkserver/chunkserver_common.h | 14 +- include/client/libcurve.h | 304 +- include/etcdclient/etcdclient.h | 8 +- .../nebd-package/etc/nebd/nebd-client.conf | 22 +- .../nebd-package/etc/nebd/nebd-server.conf | 10 +- mk-deb.sh | 18 +- mk-tar.sh | 18 +- monitor/grafana-report.py | 18 +- monitor/grafana/dashboards/chunkserver.json | 104 +- monitor/grafana/dashboards/client.json | 34 +- monitor/grafana/dashboards/etcd.json | 2 +- monitor/grafana/dashboards/mds.json | 80 +- monitor/grafana/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- .../provisioning/dashboards/chunkserver.json | 104 +- .../provisioning/dashboards/client.json | 34 +- .../grafana/provisioning/dashboards/etcd.json | 2 +- .../grafana/provisioning/dashboards/mds.json | 80 +- .../provisioning/dashboards/report.json | 50 +- .../dashboards/snapshotcloneserver.json | 26 +- nebd/etc/nebd/nebd-client.conf | 22 +- nebd/etc/nebd/nebd-server.conf | 10 +- nebd/nebd-package/usr/bin/nebd-daemon | 8 +- nebd/src/common/configuration.cpp | 4 +- nebd/src/common/configuration.h | 40 +- nebd/src/common/crc32.h | 20 +- nebd/src/common/file_lock.h | 12 +- nebd/src/common/name_lock.h | 16 +- nebd/src/common/stringstatus.h | 20 +- nebd/src/common/timeutility.h | 2 +- nebd/src/part1/async_request_closure.cpp | 6 +- nebd/src/part1/async_request_closure.h | 6 +- nebd/src/part1/heartbeat_manager.h | 18 +- nebd/src/part1/libnebd.cpp | 2 +- nebd/src/part1/libnebd.h | 132 +- nebd/src/part1/libnebd_file.h | 82 +- nebd/src/part1/nebd_client.cpp | 10 +- nebd/src/part1/nebd_client.h | 92 +- nebd/src/part1/nebd_common.h | 34 +- nebd/src/part1/nebd_metacache.h | 22 +- nebd/src/part2/define.h | 36 +- nebd/src/part2/file_entity.cpp | 14 +- nebd/src/part2/file_entity.h | 112 +- nebd/src/part2/file_manager.cpp | 6 +- nebd/src/part2/file_manager.h | 118 +- nebd/src/part2/heartbeat_manager.cpp | 2 +- nebd/src/part2/heartbeat_manager.h | 48 +- nebd/src/part2/main.cpp | 6 +- nebd/src/part2/metafile_manager.cpp | 20 +- nebd/src/part2/metafile_manager.h | 24 +- nebd/src/part2/nebd_server.cpp | 4 +- nebd/src/part2/nebd_server.h | 40 +- nebd/src/part2/request_executor.h | 8 +- nebd/src/part2/request_executor_curve.h | 36 +- nebd/src/part2/util.h | 4 +- nebd/test/common/configuration_test.cpp | 10 +- nebd/test/common/test_name_lock.cpp | 20 +- .../test/part1/heartbeat_manager_unittest.cpp | 12 +- nebd/test/part1/nebd_client_unittest.cpp | 8 +- nebd/test/part2/file_manager_unittest.cpp | 54 +- .../test/part2/heartbeat_manager_unittest.cpp | 10 +- nebd/test/part2/heartbeat_service_test.cpp | 8 +- nebd/test/part2/metafile_manager_test.cpp | 40 +- nebd/test/part2/test_nebd_server.cpp | 26 +- .../part2/test_request_executor_curve.cpp | 60 +- proto/chunk.proto | 70 +- proto/cli.proto | 6 +- proto/cli2.proto | 16 +- proto/common.proto | 12 +- proto/copyset.proto | 42 +- proto/heartbeat.proto | 72 +- proto/nameserver2.proto | 114 +- proto/schedule.proto | 2 +- proto/topology.proto | 2 +- robot/Resources/keywords/deploy.py | 6 +- robot/Resources/keywords/fault_inject.py | 8 +- robot/Resources/keywords/snapshot_operate.py | 10 +- robot/curve_choas.txt | 10 +- robot/curve_robot.txt | 38 +- src/chunkserver/chunk_closure.cpp | 18 +- src/chunkserver/chunk_closure.h | 12 +- src/chunkserver/chunk_service.cpp | 54 +- src/chunkserver/chunk_service.h | 4 +- src/chunkserver/chunk_service_closure.cpp | 24 +- src/chunkserver/chunk_service_closure.h | 26 +- src/chunkserver/chunkserver.cpp | 80 +- src/chunkserver/chunkserver.h | 20 +- src/chunkserver/chunkserver_helper.cpp | 2 +- src/chunkserver/chunkserver_main.cpp | 2 +- src/chunkserver/chunkserver_metrics.cpp | 18 +- src/chunkserver/chunkserver_metrics.h | 248 +- src/chunkserver/cli.h | 12 +- src/chunkserver/cli2.cpp | 16 +- src/chunkserver/cli2.h | 18 +- src/chunkserver/clone_copyer.h | 44 +- src/chunkserver/clone_core.cpp | 84 +- src/chunkserver/clone_core.h | 82 +- src/chunkserver/clone_manager.cpp | 4 +- src/chunkserver/clone_manager.h | 40 +- src/chunkserver/clone_task.h | 8 +- src/chunkserver/conf_epoch_file.cpp | 18 +- src/chunkserver/conf_epoch_file.h | 38 +- src/chunkserver/config_info.h | 62 +- src/chunkserver/copyset_node.cpp | 134 +- src/chunkserver/copyset_node.h | 209 +- src/chunkserver/copyset_node_manager.cpp | 54 +- src/chunkserver/copyset_node_manager.h | 120 +- src/chunkserver/copyset_service.cpp | 14 +- src/chunkserver/copyset_service.h | 8 +- src/chunkserver/heartbeat.cpp | 22 +- src/chunkserver/heartbeat.h | 60 +- src/chunkserver/heartbeat_helper.cpp | 16 +- src/chunkserver/heartbeat_helper.h | 42 +- src/chunkserver/inflight_throttle.h | 14 +- src/chunkserver/op_request.cpp | 88 +- src/chunkserver/op_request.h | 88 +- src/chunkserver/passive_getfn.h | 30 +- .../raftsnapshot/curve_file_adaptor.h | 2 +- .../raftsnapshot/curve_file_service.cpp | 18 +- .../raftsnapshot/curve_filesystem_adaptor.cpp | 26 +- .../raftsnapshot/curve_filesystem_adaptor.h | 108 +- .../curve_snapshot_attachment.cpp | 8 +- .../raftsnapshot/curve_snapshot_attachment.h | 32 +- .../raftsnapshot/curve_snapshot_copier.cpp | 10 +- .../raftsnapshot/curve_snapshot_copier.h | 2 +- .../raftsnapshot/curve_snapshot_file_reader.h | 5 +- src/chunkserver/raftsnapshot/define.h | 4 +- src/chunkserver/register.cpp | 6 +- src/chunkserver/register.h | 4 +- src/chunkserver/trash.cpp | 36 +- src/chunkserver/trash.h | 60 +- src/client/chunk_closure.cpp | 60 +- src/client/chunk_closure.h | 64 +- src/client/client_common.h | 28 +- src/client/client_metric.h | 146 +- src/client/config_info.h | 174 +- src/client/copyset_client.cpp | 52 +- src/client/copyset_client.h | 137 +- src/client/file_instance.cpp | 16 +- src/client/file_instance.h | 70 +- src/client/inflight_controller.h | 28 +- src/client/io_condition_varaiable.h | 18 +- src/client/io_tracker.cpp | 8 +- src/client/io_tracker.h | 176 +- src/client/iomanager.h | 12 +- src/client/iomanager4chunk.h | 82 +- src/client/iomanager4file.cpp | 8 +- src/client/iomanager4file.h | 122 +- src/client/lease_executor.cpp | 2 +- src/client/lease_executor.h | 94 +- src/client/libcurve_file.cpp | 12 +- src/client/libcurve_file.h | 186 +- src/client/libcurve_snapshot.h | 246 +- src/client/mds_client.cpp | 64 +- src/client/mds_client.h | 363 ++- src/client/mds_client_base.h | 300 +- src/client/metacache.cpp | 24 +- src/client/metacache.h | 178 +- src/client/metacache_struct.h | 44 +- src/client/request_closure.h | 36 +- src/client/request_context.h | 18 +- src/client/request_scheduler.cpp | 24 +- src/client/request_scheduler.h | 84 +- src/client/request_sender.h | 118 +- src/client/request_sender_manager.cpp | 2 +- src/client/request_sender_manager.h | 20 +- src/client/service_helper.cpp | 50 +- src/client/service_helper.h | 44 +- src/client/splitor.h | 70 +- src/client/unstable_helper.cpp | 2 +- src/client/unstable_helper.h | 26 +- src/common/authenticator.h | 16 +- src/common/bitmap.cpp | 30 +- src/common/bitmap.h | 130 +- src/common/channel_pool.h | 10 +- .../concurrent/bounded_blocking_queue.h | 2 +- src/common/concurrent/concurrent.h | 6 +- src/common/concurrent/count_down_event.h | 26 +- src/common/concurrent/task_thread_pool.h | 32 +- src/common/configuration.cpp | 8 +- src/common/configuration.h | 62 +- src/common/crc32.h | 20 +- src/common/curve_define.h | 4 +- src/common/define.h | 50 +- src/common/fs_util.h | 2 +- src/common/interruptible_sleeper.h | 14 +- src/common/location_operator.cpp | 4 +- src/common/location_operator.h | 40 +- src/common/net_common.h | 4 +- src/common/s3_adapter.cpp | 2 +- src/common/s3_adapter.h | 114 +- .../snapshotclone/snapshotclone_define.cpp | 2 +- .../snapshotclone/snapshotclone_define.h | 50 +- src/common/stringstatus.h | 20 +- src/common/timeutility.h | 4 +- src/common/uuid.h | 32 +- src/common/wait_interval.h | 12 +- src/fs/ext4_filesystem_impl.cpp | 20 +- src/fs/local_filesystem.h | 170 +- src/kvstorageclient/etcd_client.h | 2 +- src/leader_election/leader_election.cpp | 2 +- src/leader_election/leader_election.h | 28 +- src/mds/nameserver2/clean_core.cpp | 6 +- src/mds/nameserver2/clean_core.h | 18 +- src/mds/nameserver2/clean_manager.h | 6 +- src/mds/nameserver2/clean_task.h | 2 +- src/mds/nameserver2/clean_task_manager.cpp | 2 +- src/mds/nameserver2/clean_task_manager.h | 26 +- src/snapshotcloneserver/clone/clone_core.cpp | 116 +- src/snapshotcloneserver/clone/clone_core.h | 324 +- .../clone/clone_service_manager.cpp | 50 +- .../clone/clone_service_manager.h | 174 +- src/snapshotcloneserver/clone/clone_task.h | 26 +- .../clone/clone_task_manager.cpp | 20 +- .../clone/clone_task_manager.h | 54 +- src/snapshotcloneserver/common/config.h | 40 +- .../common/curvefs_client.h | 240 +- .../common/snapshotclone_info.h | 52 +- .../common/snapshotclone_meta_store.h | 86 +- .../common/snapshotclone_meta_store_etcd.h | 12 +- .../common/snapshotclone_metric.h | 28 +- src/snapshotcloneserver/common/task.h | 12 +- src/snapshotcloneserver/common/task_info.h | 48 +- src/snapshotcloneserver/common/thread_pool.h | 22 +- src/snapshotcloneserver/main.cpp | 4 +- .../snapshot/snapshot_core.cpp | 64 +- .../snapshot/snapshot_core.h | 210 +- .../snapshot/snapshot_data_store.cpp | 4 +- .../snapshot/snapshot_data_store.h | 132 +- .../snapshot/snapshot_data_store_s3.h | 2 +- .../snapshot/snapshot_service_manager.cpp | 16 +- .../snapshot/snapshot_service_manager.h | 118 +- .../snapshot/snapshot_task.cpp | 22 +- .../snapshot/snapshot_task.h | 96 +- .../snapshot/snapshot_task_manager.cpp | 8 +- .../snapshot/snapshot_task_manager.h | 58 +- .../snapshotclone_server.cpp | 2 +- .../snapshotclone_server.h | 20 +- .../snapshotclone_service.cpp | 16 +- .../snapshotclone_service.h | 16 +- src/tools/chunkserver_client.cpp | 6 +- src/tools/chunkserver_client.h | 32 +- src/tools/chunkserver_tool_factory.h | 10 +- src/tools/common.cpp | 6 +- src/tools/common.h | 6 +- src/tools/consistency_check.cpp | 14 +- src/tools/consistency_check.h | 90 +- src/tools/copyset_check.cpp | 6 +- src/tools/copyset_check.h | 72 +- src/tools/copyset_check_core.cpp | 99 +- src/tools/copyset_check_core.h | 196 +- src/tools/curve_cli.cpp | 6 +- src/tools/curve_cli.h | 62 +- src/tools/curve_format_main.cpp | 24 +- src/tools/curve_meta_tool.cpp | 12 +- src/tools/curve_meta_tool.h | 26 +- src/tools/curve_tool_define.h | 20 +- src/tools/curve_tool_factory.h | 18 +- src/tools/curve_tool_main.cpp | 6 +- src/tools/etcd_client.h | 22 +- src/tools/mds_client.cpp | 22 +- src/tools/mds_client.h | 340 +-- src/tools/metric_client.cpp | 2 +- src/tools/metric_client.h | 38 +- src/tools/metric_name.h | 2 +- src/tools/namespace_tool.cpp | 12 +- src/tools/namespace_tool.h | 44 +- src/tools/namespace_tool_core.cpp | 10 +- src/tools/namespace_tool_core.h | 112 +- src/tools/raft_log_tool.cpp | 2 +- src/tools/raft_log_tool.h | 60 +- src/tools/schedule_tool.cpp | 2 +- src/tools/schedule_tool.h | 22 +- src/tools/snapshot_check.h | 24 +- src/tools/snapshot_clone_client.cpp | 8 +- src/tools/snapshot_clone_client.h | 48 +- src/tools/status_tool.cpp | 14 +- src/tools/status_tool.h | 78 +- src/tools/version_tool.cpp | 2 +- src/tools/version_tool.h | 62 +- test/chunkserver/braft_cli_service2_test.cpp | 58 +- test/chunkserver/braft_cli_service_test.cpp | 22 +- test/chunkserver/chunk_service_test.cpp | 22 +- test/chunkserver/chunk_service_test2.cpp | 46 +- test/chunkserver/chunkserver_helper_test.cpp | 6 +- test/chunkserver/chunkserver_service_test.cpp | 10 +- .../chunkserver/chunkserver_snapshot_test.cpp | 390 +-- test/chunkserver/chunkserver_test_util.cpp | 16 +- test/chunkserver/chunkserver_test_util.h | 100 +- test/chunkserver/cli2_test.cpp | 60 +- test/chunkserver/cli_test.cpp | 46 +- test/chunkserver/client.cpp | 2 +- test/chunkserver/clone/clone_copyer_test.cpp | 34 +- test/chunkserver/clone/clone_core_test.cpp | 176 +- test/chunkserver/clone/clone_manager_test.cpp | 40 +- test/chunkserver/clone/op_request_test.cpp | 430 +-- test/chunkserver/copyset_epoch_test.cpp | 14 +- .../chunkserver/copyset_node_manager_test.cpp | 22 +- test/chunkserver/copyset_node_test.cpp | 44 +- test/chunkserver/copyset_service_test.cpp | 28 +- .../datastore/datastore_mock_unittest.cpp | 612 ++-- .../datastore/file_helper_unittest.cpp | 10 +- .../datastore/filepool_mock_unittest.cpp | 144 +- .../datastore/filepool_unittest.cpp | 14 +- test/chunkserver/fake_datastore.h | 2 +- test/chunkserver/heartbeat_helper_test.cpp | 34 +- test/chunkserver/heartbeat_test.cpp | 54 +- test/chunkserver/heartbeat_test_common.cpp | 10 +- test/chunkserver/heartbeat_test_common.h | 64 +- test/chunkserver/heartbeat_test_main.cpp | 8 +- test/chunkserver/inflight_throttle_test.cpp | 6 +- test/chunkserver/metrics_test.cpp | 74 +- ...curve_filesystem_adaptor_mock_unittest.cpp | 18 +- .../curve_filesystem_adaptor_unittest.cpp | 24 +- .../curve_snapshot_attachment_test.cpp | 6 +- ...raftsnapshot_chunkfilepool_integration.cpp | 114 +- test/chunkserver/server.cpp | 2 +- test/chunkserver/trash_test.cpp | 2 +- test/client/client_common_unittest.cpp | 12 +- .../client_mdsclient_metacache_unittest.cpp | 94 +- test/client/client_metric_test.cpp | 18 +- test/client/client_session_unittest.cpp | 2 +- test/client/client_unstable_helper_test.cpp | 14 +- test/client/client_userinfo_unittest.cpp | 2 +- test/client/copyset_client_test.cpp | 226 +- test/client/fake/client_workflow_test.cpp | 4 +- .../client/fake/client_workflow_test4snap.cpp | 2 +- test/client/fake/fakeChunkserver.h | 2 +- test/client/fake/fakeMDS.h | 4 +- test/client/inflight_rpc_control_test.cpp | 25 +- test/client/iotracker_splitor_unittest.cpp | 34 +- test/client/lease_executor_test.cpp | 2 +- test/client/libcbd_libcurve_test.cpp | 2 +- test/client/libcurve_interface_unittest.cpp | 114 +- test/client/mds_failover_test.cpp | 62 +- test/client/mock/mock_chunkservice.h | 6 +- test/client/request_scheduler_test.cpp | 20 +- test/client/request_sender_test.cpp | 2 +- test/common/bitmap_test.cpp | 24 +- test/common/channel_pool_test.cpp | 8 +- test/common/configuration_test.cpp | 24 +- test/common/count_down_event_test.cpp | 18 +- test/common/lru_cache_test.cpp | 40 +- test/common/task_thread_pool_test.cpp | 32 +- test/common/test_name_lock.cpp | 20 +- test/failpoint/failpoint_test.cpp | 20 +- test/fs/ext4_filesystem_test.cpp | 2 +- .../chunkserver/chunkserver_basic_test.cpp | 98 +- .../chunkserver/chunkserver_clone_recover.cpp | 194 +- .../chunkserver_concurrent_test.cpp | 214 +- .../datastore/datastore_basic_test.cpp | 36 +- .../datastore/datastore_clone_case_test.cpp | 98 +- .../datastore/datastore_concurrency_test.cpp | 6 +- .../datastore/datastore_exception_test.cpp | 310 +- .../datastore/datastore_integration_base.h | 2 +- .../datastore/datastore_integration_test.cpp | 108 +- .../datastore/datastore_restart_test.cpp | 142 +- .../datastore_snapshot_case_test.cpp | 104 +- .../datastore/datastore_stress_test.cpp | 18 +- .../client/chunkserver_exception_test.cpp | 208 +- .../client/common/file_operation.cpp | 8 +- .../client/common/file_operation.h | 2 +- .../integration/client/mds_exception_test.cpp | 404 +-- .../unstable_chunkserver_exception_test.cpp | 40 +- test/integration/cluster_common/cluster.cpp | 32 +- test/integration/cluster_common/cluster.h | 255 +- .../cluster_common/cluster_basic_test.cpp | 60 +- .../integration/cluster_common/mds.basic.conf | 158 +- test/integration/common/chunkservice_op.cpp | 28 +- test/integration/common/chunkservice_op.h | 148 +- test/integration/common/config_generator.h | 2 +- test/integration/common/peer_cluster.cpp | 116 +- test/integration/common/peer_cluster.h | 202 +- test/integration/heartbeat/common.cpp | 4 +- test/integration/heartbeat/common.h | 86 +- .../heartbeat/heartbeat_basic_test.cpp | 2650 ++++++++--------- .../heartbeat/heartbeat_exception_test.cpp | 78 +- .../raft/raft_config_change_test.cpp | 550 ++-- .../raft/raft_log_replication_test.cpp | 276 +- test/integration/raft/raft_snapshot_test.cpp | 182 +- test/integration/raft/raft_vote_test.cpp | 310 +- .../fake_curvefs_client.cpp | 6 +- .../snapshotcloneserver/fake_curvefs_client.h | 16 +- .../snapshotcloneserver_common_test.cpp | 342 +-- .../snapshotcloneserver_concurrent_test.cpp | 34 +- .../snapshotcloneserver_exception_test.cpp | 46 +- .../snapshotcloneserver_module.cpp | 2 +- .../snapshotcloneserver_recover_test.cpp | 132 +- .../snapshotcloneserver_test.cpp | 10 +- test/kvstorageclient/etcdclient_test.cpp | 70 +- .../chunkserver_healthy_checker_test.cpp | 24 +- test/mds/heartbeat/heartbeat_manager_test.cpp | 2 +- .../alloc_statistic_helper_test.cpp | 14 +- .../allocstatistic/alloc_statistic_test.cpp | 22 +- test/mds/nameserver2/clean_core_test.cpp | 4 +- test/mds/nameserver2/curvefs_test.cpp | 92 +- test/mds/nameserver2/file_lock_test.cpp | 4 +- test/mds/nameserver2/file_record_test.cpp | 24 +- .../nameserver2/namespace_service_test.cpp | 74 +- test/mds/schedule/coordinator_test.cpp | 52 +- test/mds/schedule/leaderScheduler_test.cpp | 4 +- test/mds/schedule/operatorStep_test.cpp | 10 +- .../mds/schedule/rapidLeaderSheduler_test.cpp | 16 +- test/mds/schedule/recoverScheduler_test.cpp | 12 +- test/mds/schedule/scheduleMetrics_test.cpp | 24 +- .../scheduleService/scheduleService_test.cpp | 8 +- .../schedule/schedulerPOC/scheduler_poc.cpp | 272 +- test/mds/schedule/scheduler_helper_test.cpp | 64 +- test/mds/server/mds_test.cpp | 30 +- test/mds/topology/test_topology.cpp | 12 +- .../test_topology_chunk_allocator.cpp | 10 +- test/mds/topology/test_topology_metric.cpp | 2 +- test/resources.list | 48 +- test/snapshotcloneserver/test_clone_core.cpp | 2 +- .../test_curvefs_client.cpp | 4 +- .../test_snapshot_core.cpp | 26 +- .../test_snapshot_service_manager.cpp | 8 +- test/tools/chunkserver_client_test.cpp | 22 +- test/tools/config/data_check.conf | 102 +- test/tools/copyset_check_core_test.cpp | 108 +- test/tools/copyset_check_test.cpp | 44 +- test/tools/curve_cli_test.cpp | 52 +- test/tools/curve_meta_tool_test.cpp | 18 +- test/tools/data_consistency_check_test.cpp | 28 +- test/tools/etcd_client_test.cpp | 18 +- test/tools/mds_client_test.cpp | 138 +- test/tools/metric_client_test.cpp | 22 +- test/tools/namespace_tool_core_test.cpp | 32 +- test/tools/namespace_tool_test.cpp | 48 +- test/tools/raft_log_tool_test.cpp | 8 +- test/tools/segment_parser_test.cpp | 14 +- test/tools/snapshot_clone_client_test.cpp | 12 +- test/tools/status_tool_test.cpp | 114 +- test/tools/version_tool_test.cpp | 36 +- test/util/config_generator.h | 16 +- thirdparties/brpc/brpc.patch | 6 +- thirdparties/brpc/fix-gcc11.patch | 16 +- thirdparties/etcdclient/etcdclient.go | 20 +- thirdparties/glog/glog.patch | 102 +- tools/curvefsTool.cpp | 4 +- tools/snaptool/queryclone.py | 2 +- 504 files changed, 14216 insertions(+), 14222 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 923fa56139..d5819578c3 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -91,7 +91,7 @@ bind( ) #Import the glog files. -# brpc内BUILD文件在依赖glog时, 直接指定的依赖是"@com_github_google_glog//:glog" +# When the BUILD file in brpc relies on glog, the direct specified dependency is "@com_github_google_glog//:glog" git_repository( name = "com_github_google_glog", remote = "https://github.com/google/glog", diff --git a/build.sh b/build.sh index 9d714c28d6..f9e880d131 100644 --- a/build.sh +++ b/build.sh @@ -17,7 +17,7 @@ # dir=`pwd` -#step1 清除生成的目录和文件 +# step1 Clear generated directories and files bazel clean rm -rf curvefs_python/BUILD rm -rf curvefs_python/tmplib/ @@ -29,8 +29,8 @@ then exit fi -#step2 获取tag版本和git提交版本信息 -#获取tag版本 +# step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=`git status | grep -w "HEAD detached at" | awk '{print $NF}' | awk -F"v" '{print $2}'` if [ -z ${tag_version} ] then @@ -38,7 +38,7 @@ then tag_version=9.9.9 fi -#获取git提交版本信息 +# Obtain git submission version information commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'` if [ "$1" = "debug" ] then @@ -50,7 +50,7 @@ fi curve_version=${tag_version}+${commit_id}${debug} -#step3 执行编译 +# step3 Execute Compilation # check bazel verion, bazel vesion must = 4.2.2 bazel_version=`bazel version | grep "Build label" | awk '{print $3}'` if [ -z ${bazel_version} ] diff --git a/conf/chunkserver.conf b/conf/chunkserver.conf index 0cfc27b544..edb0380408 100644 --- a/conf/chunkserver.conf +++ b/conf/chunkserver.conf @@ -1,17 +1,17 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_addr} __CURVEADM_TEMPLATE__ global.port=8200 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ global.subnet=127.0.0.0/24 global.enable_external_server=true global.external_ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_external_addr} __CURVEADM_TEMPLATE__ global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, usually 16MB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.meta_page_size=4096 # chunk's block size, IO requests must align with it, supported value is |512| and |4096| @@ -21,40 +21,40 @@ global.meta_page_size=4096 # it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true global.block_size=4096 -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666, 127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10 seconds mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri=local://./0/ # __CURVEADM_TEMPLATE__ local://${prefix}/data __CURVEADM_TEMPLATE__ -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri=local://./0/chunkserver.dat # __CURVEADM_TEMPLATE__ local://${prefix}/data/chunkserver.dat __CURVEADM_TEMPLATE__ -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -70,43 +70,43 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # lease read switch, default is true(open lease read) # if false, all requests will propose to raft(log read) -# 启用lease read,一般开启,否则将退化为log read形式 +# Enable lease read, usually enabled, otherwise it will revert to log read form copyset.enable_lease_read=true -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node, and the added node first acts like a learner to copy data. +# When the gap with the leader is equal to catchup_margin entries, the leader +# will attempt to commit the configuration change entry (generally, the committed entry +# will definitely be committed and applied). A small catchup_margin can +# ensure that the learner can join the replication group quickly. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__ -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri=local://./0/recycler # __CURVEADM_TEMPLATE__ local://${prefix}/data/recycler __CURVEADM_TEMPLATE__ -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency=10 # chunkserver use how many threads to use copyset complete sync. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The current peer's applied_index and leader‘s committed_index difference is less than this value +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -132,26 +132,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Whether to paste the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/cs_client.conf __CURVEADM_TEMPLATE__ -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __CURVEADM_TEMPLATE__ # Curve File time to live curve.curve_file_timeout_s=30 @@ -159,7 +159,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -179,27 +179,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件路径 +# chunkfilepool meta file path chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__ -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -211,36 +211,36 @@ chunkfilepool.clean.throttle_iops=500 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__ -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index eb664c2fd6..c478b3dc7f 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -1,18 +1,18 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 global.port=8200 global.subnet=127.0.0.0/24 global.enable_external_server=false global.external_ip=127.0.0.1 global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, typically 16MB global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, typically 4KB global.meta_page_size=4096 -# clone chunk允许的最长location长度 +# Maximum length allowed for the location of a clone chunk # chunk's block size, IO requests must align with it, supported value is |512| and |4096| # it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations # for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents @@ -23,34 +23,35 @@ global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 -# 向mds注册的最大重试次数 +# Maximum retry count for registering with MDS mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for MDS registration, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# Interval for sending heartbeats to MDS, usually 10s mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# RPC timeout for sending heartbeats to MDS, typically 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Main directory for chunkserver chunkserver.stor_uri=local://./0/ -# chunkserver元数据文件 +# Metadata file for chunkserver chunkserver.meta_uri=local://./0/chunkserver.dat -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Throttle check cycles are for finer-grained bandwidth control. For example, +# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that +# the bandwidth is 10MB every 1/10 second, without accumulation. For instance, +# the bandwidth is 10MB for the first 1/10 second, but it expires after that. +# In the second 1/10 second, the bandwidth remains 10MB, not 20MB. chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit for the number of inflight IO requests, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term, usually checked copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Whether to disable the service for raft configuration changes, generally not disabled copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# Raft snapshot interval, usually 1800s, i.e., 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# When adding a node, the added node first copies data in a role similar to a learner. +# When there is a difference of catchup_margin entries from the leader, the leader +# will attempt to commit and apply the configuration change entry (usually the committed +# entry will certainly be committed and applied). A smaller catchup_margin can ensure +# that the learner can quickly join the replication group. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets -# raft wal log目录 +# Raft WAL log directory copyset.raft_log_uri=curve://./0/copysets -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets -# copyset回收目录 +# Copyset recycling directory copyset.recycler_uri=local://./0/recycler -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation. copyset.load_concurrency=10 -# chunkserver use how many threads to use copyset complete sync. +# Number of threads used by chunk server for copyset complete synchronization. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Maximum retry times when checking for exceptions during copyset loading. copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# If the difference between the applied_index of the current peer and the committed_index +# on the leader is less than this value, the copyset is considered loaded. copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for cyclically determining if the copyset is loaded. copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -124,26 +125,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # Curve File time to live curve.curve_file_timeout_s=30 @@ -151,7 +152,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -171,27 +172,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/ -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -203,36 +204,36 @@ chunkfilepool.clean.throttle_iops=500 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/conf/client.conf b/conf/client.conf index 144bf5ff47..65bf3e5a1c 100644 --- a/conf/client.conf +++ b/conf/client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,120 +36,120 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off, false/do not turn off global.turnOffHealthCheck=true # diff --git a/conf/cs_client.conf b/conf/cs_client.conf index 56825bd2df..37e89fd959 100644 --- a/conf/cs_client.conf +++ b/conf/cs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### mds side configuration information ################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,116 +36,116 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads for the queue +# The task of execution threads is to fetch IO and then send it over the network before moving on to the next network task. +# The time taken for a task, from retrieval from the queue to sending the RPC request, is typically between 20 microseconds to 100 microseconds. 20 microseconds is the normal case when leader acquisition is not needed during the send operation. +# If leader acquisition is required during sending, the time can be around 100 microseconds. The throughput of one thread ranges from 100,000 to 500,000 operations per second. +# The performance meets the requirements. schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +#Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/mds.conf b/conf/mds.conf index 1e1a7eb273..95d7eca3bb 100644 --- a/conf/mds.conf +++ b/conf/mds.conf @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 #__CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ mds.dummy.listen.port=6667 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ @@ -7,15 +7,15 @@ global.subnet=127.0.0.0/24 global.port=6666 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # wait dlock timeout mds.etcd.dlock.timeoutMs=10000 @@ -23,68 +23,68 @@ mds.etcd.dlock.timeoutMs=10000 mds.etcd.dlock.ttlSec=10 # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=10000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 mds.segment.discard.scanIntevalMs=5000 -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true # Scan scheduler switch mds.enable.scan.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec=60 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=1 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=60 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=300 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec=1800 # Scan operator timeout (seconds) mds.scheduler.scan.limitSec=180 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec=1800 # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour=0 @@ -98,104 +98,104 @@ mds.scheduler.scan.concurrent.per.pool=10 mds.scheduler.scan.concurrent.per.chunkserver=1 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=10000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=30000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files=5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16)*12) * 2621440~=1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs=5000000 -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs=500000 # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName=root mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=10 -#和mds.chunkserver.failure.tolerance设置有关,一个zone 标准配置20台节点,如果允许3台节点failover, -#那么剩余17台机器需要承载原先20台机器的空间,17/20=0.85,即使用量超过这个值即不再往这个池分配, -#具体分为来两种情况, 当不使用chunkfilepool,物理池限制使用百分比,当使用 chunkfilepool 进行chunkfilepool分配时需预留failover空间, +# It is related to the settings of mds.chunkserver.failure.tolerance. A standard configuration for a zone is 20 nodes, and if 3 nodes are allowed to fail over, +# So the remaining 17 machines need to carry the space of the original 20 machines, 17/20=0.85. Even if the usage exceeds this value, they will no longer be allocated to this pool, +# There are two specific situations: when chunkfilepool is not used, the physical pool limits the percentage of usage, and when chunkfilepool is used for chunkfilepool allocation, it is necessary to reserve failover space, mds.topology.PoolUsagePercentLimit=85 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus=false # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +#Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of deviation from the mean scatterWidth of all chunk servers. +# Setting a large percentage deviation for scatterWidth can result in some machines having scatterWidth values that are too small, affecting machine recovery time and reducing cluster reliability. +# Additionally, it can lead to some machines having excessively large scatterWidth values, causing certain chunk server's copysets to be scattered across various machines. +# Once data is written to these servers, the ones with larger scatterWidth become hotspots. Setting the percentage deviation for scatterWidth too small requires a higher level of scatterWidth +# uniformity and copyset algorithm precision, potentially resulting in suboptimal algorithm results. +# It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize=1073741824 -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength=10737418240 -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength=21990232555520 # smallest read/write unit for volume, support |512| and |4096| mds.curvefs.blockSize=4096 @@ -203,29 +203,29 @@ mds.curvefs.blockSize=4096 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr=127.0.0.1:5555 # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_proxy_addr} __CURVEADM_TEMPLATE__ # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/conf/py_client.conf b/conf/py_client.conf index ebcd342adf..de9d55603f 100644 --- a/conf/py_client.conf +++ b/conf/py_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,110 +36,110 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort=10000 # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snap_client.conf b/conf/snap_client.conf index a004cb4233..bbea2d4e44 100644 --- a/conf/snap_client.conf +++ b/conf/snap_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +###################MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=false -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,116 +36,116 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +#################Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# getleaer backup request使用的load balancer方法 +# The load balancer method used by getleaer backup request metacache.getLeaderBackupRequestLbName=rr -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=1 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=50 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=16000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +#After the number of unstable chunkservers on the same server exceeds this value +#All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +#Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=64 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/conf/snapshot_clone_server.conf b/conf/snapshot_clone_server.conf index d4fdf2b64c..01d2ca9158 100644 --- a/conf/snapshot_clone_server.conf +++ b/conf/snapshot_clone_server.conf @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/snap_client.conf __CURVEADM_TEMPLATE__ -# mds root 用户名 +# Mds root username mds.rootUser=root -# mds root 密码 +# Mds root password mds.rootPassword=root_password -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec=300 -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs=5000 -# 日志文件位置 +# Log file location log.dir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ # @@ -26,61 +26,61 @@ s3.config_path=./conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __ server.address=127.0.0.1:5556 # __CURVEADM_TEMPLATE__ ${service_addr}:${service_port} __CURVEADM_TEMPLATE__ server.subnet=127.0.0.0/24 server.port=5556 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__ -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec=300 -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs=5000 -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum=256 -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs=1000 -# 转储chunk分片大小 +# Dump chunk shard size # for nos, pls set to 1048576 server.chunkSplitSize=8388608 -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs=1000 -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit=1024 -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum=64 -# mds session 时间 +# Mds session time server.mdsSessionTimeUs=5000000 -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency=16 # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum=256 -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum=256 -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum=256 -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs=1000 -# clone chunk分片大小 +# Clone chunk shard size # for nos, pls set to 65536 server.cloneChunkSplitSize=1048576 -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir=/clone -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency=64 -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency=64 -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs=500 -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs=3600000 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address etcd.endpoint=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times=3 # wait dlock timeout etcd.dlock.timeoutMs=10000 @@ -88,20 +88,20 @@ etcd.dlock.timeoutMs=10000 etcd.dlock.ttlSec=10 # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix=snapshotcloneserverleaderlock -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms=0 # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port=8081 # __CURVEADM_TEMPLATE__ ${service_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/conf/tools.conf b/conf/tools.conf index 545297d92c..42be38e27c 100644 --- a/conf/tools.conf +++ b/conf/tools.conf @@ -1,16 +1,16 @@ -# mds地址 +# Mds address mdsAddr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__ # mds dummy port mdsDummyPort=6700 # __CURVEADM_TEMPLATE__ ${cluster_mds_dummy_port} __CURVEADM_TEMPLATE__ -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout=500 -# rpc重试次数 +# RPC retry count rpcRetryTimes=5 # the rpc concurrency to chunkserver rpcConcurrentNum=10 -# etcd地址 +# ETCD address etcdAddr=127.0.0.1:2379 # __CURVEADM_TEMPLATE__ ${cluster_etcd_addr} __CURVEADM_TEMPLATE__ -# snapshot clone server 地址 +# Snapshot clone server address snapshotCloneAddr= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_addr} __CURVEADM_TEMPLATE__ # snapshot clone server dummy port snapshotCloneDummyPort= # __CURVEADM_TEMPLATE__ ${cluster_snapshotclone_dummy_port} __CURVEADM_TEMPLATE__ diff --git a/curve-ansible/client.ini b/curve-ansible/client.ini index 8eacc6270c..ecf308581d 100644 --- a/curve-ansible/client.ini +++ b/curve-ansible/client.ini @@ -1,7 +1,7 @@ [client] localhost ansible_ssh_host=127.0.0.1 -# 仅用于生成配置中的mds地址 +# Only used to generate mds addresses in the configuration [mds] localhost ansible_ssh_host=127.0.0.1 diff --git a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml index 7121b28042..8200229894 100644 --- a/curve-ansible/common_tasks/wait_copysets_status_healthy.yml +++ b/curve-ansible/common_tasks/wait_copysets_status_healthy.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 等待copyset健康,每个一段时间检查一次,一共检查若干次,成功则break,如果一直不健康则报错 +# Wait for the copyset to be healthy, check once every period of time, a total of several times. If successful, it will break, and if it remains unhealthy, an error will be reported - name: check copysets status until healthy shell: curve_ops_tool copysets-status --confPath={{ curve_ops_tool_config }} | grep "{{ defined_copysets_status }}" register: result diff --git a/curve-ansible/group_vars/mds.yml b/curve-ansible/group_vars/mds.yml index f575cb79d5..689b1414eb 100644 --- a/curve-ansible/group_vars/mds.yml +++ b/curve-ansible/group_vars/mds.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 集群拓扑信息 +# Cluster topology information cluster_map: servers: - name: server1 diff --git a/curve-ansible/roles/generate_config/defaults/main.yml b/curve-ansible/roles/generate_config/defaults/main.yml index 4d7dfe5514..36d14e676b 100644 --- a/curve-ansible/roles/generate_config/defaults/main.yml +++ b/curve-ansible/roles/generate_config/defaults/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 通用配置 +# General configuration curve_root_username: root curve_root_password: root_password curve_file_timeout_s: 30 @@ -25,7 +25,7 @@ min_file_length: 10737418240 max_file_length: 21990232555520 file_expired_time_us: 5000000 -# mds配置默认值 +# Mds configuration default values mds_etcd_dailtimeout_ms: 5000 mds_etcd_operation_timeout_ms: 5000 mds_etcd_retry_times: 3 @@ -94,7 +94,7 @@ throttle_bps_min_in_MB: 120 throttle_bps_max_in_MB: 260 throttle_bps_per_GB_in_MB: 0.3 -# chunkserver配置默认值 +# Chunkserver Configuration Default Values chunkserver_enable_external_server: true chunkserver_meta_page_size: 4096 chunkserver_location_limit: 3000 @@ -165,7 +165,7 @@ chunkserver_trash_expire_after_sec: 300 chunkserver_trash_scan_period_sec: 120 chunkserver_common_log_dir: ./runlog/ -# 快照克隆配置默认值 +# Default values for snapshot clone configuration snap_client_config_path: /etc/curve/snap_client.conf snap_client_method_retry_time_sec: 120 snap_client_method_retry_interval_ms: 5000 @@ -201,7 +201,7 @@ snap_leader_session_inter_sec: 5 snap_leader_election_timeout_ms: 0 snap_nginx_addr: 127.0.0.1:5555 -# client配置默认值 +# Default values for client configuration client_register_to_mds: true client_mds_rpc_timeout_ms: 500 client_mds_max_rpc_timeout_ms: 2000 @@ -244,7 +244,7 @@ client_discard_enable: true client_discard_granularity: 4096 client_discard_task_delay_ms: 60000 -# nebd默认配置 +# Nebd default configuration client_config_path: /etc/curve/client.conf nebd_client_sync_rpc_retry_times: 50 nebd_client_rpc_retry_inverval_us: 100000 @@ -259,7 +259,7 @@ nebd_server_heartbeat_timeout_s: 30 nebd_server_heartbeat_check_interval_ms: 3000 nebd_server_response_return_rpc_when_io_error: false -# s3配置默认值 +# Default values for s3 configuration s3_http_scheme: 0 s3_verify_ssl: false s3_user_agent_conf: S3 Browser @@ -276,15 +276,15 @@ s3_throttle_bpsTotalLimit: 1280 s3_throttle_bpsReadLimit: 1280 s3_throttle_bpsWriteLimit: 1280 -# 运维工具默认值 +# Default values for operation and maintenance tools tool_rpc_timeout: 500 tool_rpc_retry_times: 5 tool_rpc_concurrent_num: 10 -# snapshotclone_nginx配置 +# snapshotclone_nginx configuration nginx_docker_internal_port: 80 -# etcd默认配置 +# ETCD default configuration etcd_snapshot_count: 10000 etcd_heartbeat_interval: 100 etcd_election_timeout: 1000 diff --git a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 index 0e7e65e9cc..ae43478df7 100644 --- a/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/chunkserver.conf.j2 @@ -1,24 +1,24 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip={{ ansible_ssh_host }} global.port={{ chunkserver_base_port }} global.subnet={{ chunkserver_subnet }} global.enable_external_server={{ chunkserver_enable_external_server }} global.external_ip={{ ansible_ssh_host }} global.external_subnet={{ chunkserver_external_subnet }} -# chunk大小,一般16MB +# Chunk size, usually 16MB global.chunk_size={{ chunk_size }} -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, usually 4KB global.meta_page_size={{ chunkserver_meta_page_size }} -# clone chunk允许的最长location长度 +# The maximum allowed location length for clone chunks global.location_limit={{ chunkserver_location_limit }} # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666,127.0.0.1:7777 {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -26,30 +26,30 @@ global.location_limit={{ chunkserver_location_limit }} {% set _ = mds_address.append("%s:%s" % (mds_ip, mds_port)) -%} {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 向mds注册的最大重试次数 +# Maximum number of retries registered with mds mds.register_retries={{ chunkserver_register_retries }} -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for registering with mds, typically 1000ms mds.register_timeout={{ chunkserver_register_timeout }} -# 向mds发送心跳的间隔,一般10s +# The interval between sending heartbeat to MDS, usually 10s mds.heartbeat_interval={{ chunkserver_heartbeat_interval }} -# 向mds发送心跳的rpc超时间,一般1000ms +# Send rpc timeout of heartbeat to mds, usually 1000ms mds.heartbeat_timeout={{ chunkserver_heartbeat_timeout }} # # Chunkserver settings # -# chunkserver主目录 +# Chunkserver home directory chunkserver.stor_uri={{ chunkserver_stor_uri }} -# chunkserver元数据文件 +# Chunkserver metadata file chunkserver.meta_uri={{ chunkserver_meta_uri }} -# disk类型 +# Disk type chunkserver.disk_type={{ chunkserver_disk_type }} -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes={{ chunkserver_snapshot_throttle_throughput_bytes }} -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB, +# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one +# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and +# Not a bandwidth of 20MB chunkserver.snapshot_throttle_check_cycles={{ chunkserver_snapshot_throttle_check_cycles }} chunkserver.max_inflight_requests={{ chunkserver_max_inflight_requests }} @@ -64,39 +64,39 @@ test.testcopyset_conf={{ chunkserver_test_testcopyset_conf }} # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term of office, general inspection copyset.check_term={{ chunkserver_copyset_check_term }} -# 是否关闭raft配置变更的服务,一般不关闭 +# Do you want to close the service for raft configuration changes? Generally, it is not closed copyset.disable_cli={{ chunkserver_copyset_disable_cli }} copyset.log_applied_task={{ chunkserver_copyset_log_applied_task }} -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms={{ chunkserver_copyset_election_timeout_ms }} -# raft打快照间隔,一般是1800s,也就是30分钟 +# The snapshot interval for the raft is usually 1800s, which is 30 minutes copyset.snapshot_interval_s={{ chunkserver_copyset_snapshot_interval_s }} -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# Add a node. The node added by 'add' first copies data in a way similar to a learner. +# When the difference from the leader reaches 'catchup_margin' entries, +# the leader will attempt to commit the configuration-changing entry. +# Generally, the committed and applied entry will definitely be committed and applied. +# A smaller catchup_margin can significantly ensure that the learner can quickly join the replication group soon after. copyset.catchup_margin={{ chunkserver_copyset_catchup_margin }} -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri={{ chunkserver_copyset_chunk_data_uri }} -# raft wal log目录 +# Raft wal log directory copyset.raft_log_uri={{ chunkserver_copyset_raft_log_uri }} -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri={{ chunkserver_copyset_raft_meta_uri }} -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri={{ chunkserver_copyset_raft_snapshot_uri }} -# copyset回收目录 +# Copyset Recycle Directory copyset.recycler_uri={{ chunkserver_copyset_recycler_uri }} -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed copyset.load_concurrency={{ chunkserver_copyset_load_concurrency }} -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Check if the copyset has completed loading and the maximum number of retries when an exception occurs copyset.check_retrytimes={{ chunkserver_copyset_check_retrytimes }} -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# The difference between the applied_index of the current peer and the committed_index on the leader is less than this value. +# Then it is determined that the copyset has been loaded successfully copyset.finishload_margin={{ chunkserver_copyset_finishload_margin }} -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for loop determination of whether copyset has been loaded and completed copyset.check_loadmargin_interval_ms={{ chunkserver_copyset_check_loadmargin_interval_ms }} # scan copyset interval copyset.scan_interval_sec={{ chunkserver_copyset_scan_interval_sec }} @@ -115,26 +115,26 @@ copyset.check_syncing_interval_ms={{ chunkserver_copyset_check_syncing_interval_ # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client={{ disable_snapshot_clone }} -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter={{ disable_snapshot_clone }} -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size={{ chunkserver_clone_slice_size }} -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste={{ chunkserver_clone_enable_paste }} -# 克隆的线程数量 +# Number of cloned threads clone.thread_num={{ chunkserver_clone_thread_num }} -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth={{ chunkserver_clone_queue_depth }} -# curve用户名 +# Curve username curve.root_username={{ curve_root_username }} -# curve密码 +# Curve password curve.root_password={{ curve_root_password }} -# client配置文件 +# Client configuration file curve.config_path={{ chunkserver_client_config_path }} -# s3配置文件 +# S3 configuration file s3.config_path={{ chunkserver_s3_config_path }} # Curve File time to live curve.curve_file_timeout_s={{ curve_file_timeout_s }} @@ -142,7 +142,7 @@ curve.curve_file_timeout_s={{ curve_file_timeout_s }} # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2={{ chunkserver_fs_enable_renameat2 }} # @@ -163,27 +163,27 @@ storeng.sync_write={{ chunkserver_storeng_sync_write }} # # Concurrent apply module # -# 并发模块的并发度,一般是10 +# The concurrency of concurrent modules is generally 10 wconcurrentapply.size={{ chunkserver_wconcurrentapply_size }} -# 并发模块线程的队列深度 +# Queue depth of concurrent module threads wconcurrentapply.queuedepth={{ chunkserver_wconcurrentapply_queuedepth }} -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size={{ chunkserver_rconcurrentapply_size }} -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth={{ chunkserver_rconcurrentapply_queuedepth }} # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool={{ chunkserver_format_disk }} -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir={{ chunkserver_chunkfilepool_chunk_file_pool_dir }} -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size={{ chunkserver_chunkfilepool_cpmeta_file_size }} -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable={{ chunkserver_chunkfilepool_clean_enable }} @@ -195,34 +195,34 @@ chunkfilepool.clean.throttle_iops={{ chunkserver_chunkfilepool_clean_throttle_io # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool={{ walfilepool_use_chunk_file_pool }} -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool={{ chunkserver_format_disk }} -# walpool目录 +# Walpool directory walfilepool.file_pool_dir={{ chunkserver_walfilepool_file_pool_dir }} -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path={{ chunkserver_walfilepool_meta_path }} -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size={{ chunkserver_walfilepool_segment_size }} -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size={{ chunkserver_walfilepool_metapage_size }} -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size={{ chunkserver_walfilepool_meta_file_size }} -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times={{ chunkserver_walfilepool_retry_times }} # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec={{ chunkserver_trash_expire_after_sec }} -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec={{ chunkserver_trash_scan_period_sec }} # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir={{ chunkserver_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/curve-ansible/roles/generate_config/templates/client.conf.j2 b/curve-ansible/roles/generate_config/templates/client.conf.j2 index 08d4413780..492ac270bf 100644 --- a/curve-ansible/roles/generate_config/templates/client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/client.conf.j2 @@ -1,8 +1,8 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -11,25 +11,25 @@ {% endfor -%} mds.listen.addr={{ mds_address | join(',') }} -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS={{ client_register_to_mds }} -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS={{ client_mds_rpc_timeout_ms }} -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS={{ client_mds_max_rpc_timeout_ms }} -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS={{ client_mds_max_retry_ms }} -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS={{ client_mds_max_failed_times_before_change_mds }} -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease={{ client_mds_refresh_times_per_lease }} -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS={{ client_mds_rpc_retry_interval_us }} # The normal retry times for trigger wait strategy @@ -42,104 +42,104 @@ mds.maxRetryMsInIOPath={{ client_mds_max_retry_ms_in_io_path }} mds.waitSleepMs={{ client_mds_wait_sleep_ms }} # -################# metacache配置信息 ################ +################# Metacache Configuration Information################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS={{ client_metacache_get_leader_timeout_ms }} -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry={{ client_metacache_get_leader_retry }} -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS={{ client_metacache_rpc_retry_interval_us }} # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity={{ client_schedule_queue_capacity }} -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize={{ client_schedule_threadpool_size }} -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity={{ client_isolation_task_queue_capacity }} -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize={{ client_isolation_task_thread_pool_size }} # -################ 与chunkserver通信相关配置 ############# +################Configuration related to communication with chunkserver############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS={{ client_chunkserver_op_retry_interval_us }} -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry={{ client_chunkserver_op_max_retry }} -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS={{ client_chunkserver_rpc_timeout_ms }} -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead={{ client_chunkserver_enable_applied_index_read }} -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS={{ client_chunkserver_max_retry_sleep_interval_us }} -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS={{ client_chunkserver_max_rpc_timeout_ms }} -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes={{ client_chunkserver_max_stable_timeout_times }} -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs={{ client_chunkserver_check_health_timeout_ms }} -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold={{ client_chunkserver_server_stable_threshold }} -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff={{ client_chunkserver_min_retry_times_force_timeout_backoff }} -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend={{ client_chunkserver_max_retry_times_before_consider_suspend }} # -################# 文件级别配置项 ############# +#################File level configuration items############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# Libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum={{ client_file_max_inflight_rpc_num }} -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB={{ client_file_io_split_max_size_kb }} # -################# log相关配置 ############### +################# Log related configuration############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel={{ client_log_level }} -# 设置log的路径 +# Set the path of the log global.logPath={{ client_log_path }} -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # @@ -151,15 +151,15 @@ closefd.timeout={{ client_closefd_timeout_sec }} closefd.timeInterval={{ client_closefd_time_interval_sec }} # -############### metric 配置信息 ############# +############### Metric configuration information############# # global.metricDummyServerStartPort={{ client_metric_dummy_server_start_port }} -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck={{ client_turn_off_health_check }} # -# session map文件,存储打开文件的filename到path的映射 +# Session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath={{ client_session_map_path }} diff --git a/curve-ansible/roles/generate_config/templates/mds.conf.j2 b/curve-ansible/roles/generate_config/templates/mds.conf.j2 index 13040fa9ea..7e9b8f39b1 100644 --- a/curve-ansible/roles/generate_config/templates/mds.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/mds.conf.j2 @@ -1,5 +1,5 @@ # -# mds服务端口 +# Mds service port # mds.listen.addr={{ ansible_ssh_host }}:{{ mds_port }} @@ -8,9 +8,9 @@ global.subnet={{ mds_subnet }} global.port={{ mds_port }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -19,11 +19,11 @@ global.port={{ mds_port }} {% endfor -%} mds.etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs={{ mds_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs={{ mds_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times={{ mds_etcd_retry_times }} # wait dlock timeout mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} @@ -31,68 +31,68 @@ mds.etcd.dlock.timeoutMs={{ mds_etcd_dlock_timeout_ms }} mds.etcd.dlock.ttlSec={{ mds_etcd_dlock_ttl_sec }} # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs={{ mds_segment_alloc_periodic_persist_inter_ms }} -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs={{ mds_segment_alloc_retry_inter_ms }} mds.segment.discard.scanIntevalMs={{ mds_segment_discard_scan_interval_ms }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second mds.leader.sessionInterSec={{ mds_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned mds.leader.electionTimeoutMs={{ mds_leader_election_timeout_ms }} # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler={{ mds_enable_copyset_scheduler }} -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler={{ mds_enable_leader_scheduler }} -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler={{ mds_enable_recover_scheduler }} -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler={{ mds_enable_replica_scheduler }} # Scan scheduler switch mds.enable.scan.scheduler={{ mds_enable_scan_scheduler }} -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec={{ mds_copyset_scheduler_interval_sec }} -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec={{ mds_replica_scheduler_interval_sec }} -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec={{ mds_leader_scheduler_interval_sec }} -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec={{ mds_recover_scheduler_interval_sec }} # Scan scheduler run interval (seconds) mds.scan.scheduler.intervalSec={{ mds_scan_scheduler_interval_sec }} -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent={{ mds_schduler_operator_concurrent }} -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec={{ mds_schduler_transfer_limit_sec }} -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec={{ mds_scheduler_remove_limit_sec }} -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec={{ mds_scheduler_add_limit_sec }} -# change一个副本超时时间, 超时后mds从内存移除该operator +# change a replica timeout, after which the mds removes the operator from memory mds.scheduler.change.limitSec={{ mds_scheduler_change_limit_sec }} # Scan operator timeout (seconds) mds.scheduler.scan.limitSec={{ mds_scheduler_scan_limit_sec }} -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent={{ mds_scheduler_copyset_mum_range_percent }} -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent={{ mds_schduler_scatterwidth_range_percent }} -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkservers offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance={{ mds_chunkserver_failure_tolerance }} -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): Continuation is to some extent related to the time interval of the snapshot mds.scheduler.chunkserver.cooling.timeSec={{ mds_scheduler_chunkserver_cooling_time_sec }} # ScanScheduler: scan start hour in one day ([0-23]) mds.scheduler.scan.startHour={{ mds_scheduler_scan_start_hour }} @@ -106,129 +106,129 @@ mds.scheduler.scan.concurrent.per.pool={{ mds_scheduler_scan_concurrent_per_pool mds.scheduler.scan.concurrent.per.chunkserver={{ mds_scheduler_scan_concurrent_per_chunkserver }} # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs={{ mds_heartbeat_interval_ms }} -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs={{ mds_heartbeat_misstimeout_ms }} -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs={{ mds_heartbeat_offlinet_imeout_ms }} -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs={{ mds_heartbeat_clean_follower_after_ms }} # -# namespace cache相关 +#Namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count={{ mds_cache_count }} # # mds file record settings # -# mds file记录过期时间,单位us +# Mds file records expiration time, in units of us mds.file.expiredTimeUs={{ file_expired_time_us }} -# mds后台扫描线程扫描file记录间隔时间,单位us +# MDS backend scanning thread scanning file record interval time, unit: us mds.file.scanIntevalTimeUs={{ mds_file_scan_inteval_time_us }} # # auth settings # -# root用户密码 +# Root User Password mds.auth.rootUserName={{ curve_root_username }} mds.auth.rootPassword={{ curve_root_password }} # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum={{ mds_filelock_bucket_num }} # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec={{ mds_topology_topology_update_to_repo_sec }} -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs={{ mds_topology_create_copyset_rpc_timeout_ms }} -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes={{ mds_topology_create_copyset_rpc_retry_times }} -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs={{ mds_topology_create_copyset_rpc_retry_sleep_time_ms }} -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec={{ mds_topology_update_metric_interval_sec }} -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit={{ mds_topology_pool_usage_percent_limit }} -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0: Random, 1: Weight mds.topology.choosePoolPolicy={{ mds_topology_choose_pool_policy }} # enable LogicalPool ALLOW/DENY status mds.topology.enableLogicalPoolStatus={{ mds_topology_enable_logicalpool_status}} # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes={{ mds_copyset_copyset_retry_times }} -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance={{ mds_copyset_scatterwidth_variance }} -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation={{ mds_copyset_scatterwidth_standard_devation }} -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange={{ mds_copyset_scatterwidth_range }} -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# The percentage of deviation from the mean scatterWidth of all chunk servers. Setting a too large percentage of scatterWidth deviation can result in some machines having +# excessively small scatterWidth, affecting machine recovery times. Prolonged recovery times can reduce the cluster's reliability. Additionally, it can lead to some machines having +# excessively large scatterWidth, causing certain chunk servers to scatter copysets across various machines. When other machines write data, these machines with larger scatterWidth +# can become hotspots. Setting a too small percentage of scatterWidth deviation requires a greater average scatterWidth, +# which demands higher copyset algorithm requirements. This can lead to the algorithm being unable +# to produce ideal results. It is recommended to set the value to 20. mds.copyset.scatterWidthFloatingPercentage={{ mds_copyset_scatterwidth_floating_percentage }} # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize={{ chunk_size }} -# curvefs的默认segment size大小,1GB = 1*1024*1024*1024 = 1073741824 +# The default segment size for curves is 1GB = 1*1024*1024*1024 = 1073741824 mds.curvefs.defaultSegmentSize={{ segment_size }} -# curvefs的默认最小文件大小,10GB = 10*1024*1024*1024 = 10737418240 +# The default minimum file size for curvefs is 10GB = 10*1024*1024*1024 = 10737418240 mds.curvefs.minFileLength={{ min_file_length }} -# curvefs的默认最大文件大小,20TB = 20*1024*1024*1024*1024 = 21990232555520 +# The default maximum file size for curvefs is 20TB = 20*1024*1024*1024*1024 = 21990232555520 mds.curvefs.maxFileLength={{ max_file_length }} # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs={{ mds_chunkserverclient_rpc_timeout_ms }} -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes={{ mds_chunkserverclient_rpc_retry_times }} -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs={{ mds_chunkserverclient_rpc_retry_interval_ms }} -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes={{ mds_chunkserverclient_update_leader_retry_times }} -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs={{ mds_chunkserverclient_update_leader_retry_interval_ms }} # snapshotclone config # -# snapshot clone server 地址 +# snapshot clone server address mds.snapshotcloneclient.addr={{ snapshot_nginx_vip }}:{{ nginx_docker_external_port }} # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir={{ mds_common_log_dir }} -# 单元测试情况下 +# In the case of unit testing # mds.common.logDir=./runlog/ # diff --git a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 index d7121c6dad..eadcb92bd7 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-client.conf.j2 @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress={{ nebd_data_dir }}/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath={{ nebd_data_dir }}/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes={{ nebd_client_sync_rpc_retry_times }} -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs={{ nebd_client_rpc_retry_inverval_us }} -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs={{ nebd_client_rpc_retry_max_inverval_us }} -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs={{ nebd_client_rpc_hostdown_retry_inverval_us }} -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS={{ nebd_client_health_check_internal_s }} -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs={{ nebd_client_delay_health_check_internal_ms }} -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum={{ nebd_client_rpc_send_exec_queue_num }} -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS={{ nebd_client_heartbeat_inverval_s }} -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs={{ nebd_client_heartbeat_rpc_timeout_ms }} -# 日志路径 +# Log Path log.path={{ nebd_log_dir }}/client diff --git a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 index 5262d0af37..7cd700b2db 100644 --- a/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/nebd-server.conf.j2 @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath={{ client_config_path }} -#brpc server监听端口 +# brpc server listening port listen.address={{ nebd_data_dir }}/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path={{ nebd_data_dir }}/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec={{ nebd_server_heartbeat_timeout_s }} -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms={{ nebd_server_heartbeat_check_interval_ms }} # return rpc when io error diff --git a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 index ca52b19925..00c20160a0 100644 --- a/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/snapshot_clone_server.conf.j2 @@ -1,18 +1,18 @@ # # curvefs client options # -# client配置文件位置 +# Client configuration file location client.config_path={{ snap_client_config_path }} -# mds root 用户名 +# Mds root username mds.rootUser={{ curve_root_username }} -# mds root 密码 +# Mds root password mds.rootPassword={{ curve_root_password }} -# 调用client方法的重试总时间 +# The total retry time for calling the client method client.methodRetryTimeSec={{ snap_client_method_retry_time_sec }} -# 调用client方法重试间隔时间 +# Call client method retry interval client.methodRetryIntervalMs={{ snap_client_method_retry_interval_ms }} -# 日志文件位置 +# Log file location log.dir={{ snap_log_dir }} # @@ -26,53 +26,53 @@ s3.config_path={{ snap_s3_config_path }} server.address={{ ansible_ssh_host }}:{{ snapshot_port }} server.subnet={{ snapshot_subnet }} server.port={{ snapshot_port }} -# 调用client异步方法重试总时间 +# Total retry time for calling client asynchronous methods server.clientAsyncMethodRetryTimeSec={{ snap_client_async_method_retry_time_sec }} -# 调用client异步方法重试时间间隔 +# Call client asynchronous method retry interval server.clientAsyncMethodRetryIntervalMs={{ snap_client_async_method_retry_interval_ms }} -# 快照工作线程数 +# Number of snapshot worker threads server.snapshotPoolThreadNum={{ snap_snapshot_pool_thread_num }} -# 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) +# Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) server.snapshotTaskManagerScanIntervalMs={{ snap_snapshot_task_manager_scan_interval_ms }} -# 转储chunk分片大小 +# Dump chunk shard size server.chunkSplitSize={{ snap_chunk_split_size }} -# CheckSnapShotStatus调用间隔 +# CheckSnapShotStatus call interval server.checkSnapshotStatusIntervalMs={{ snap_check_snapshot_status_interval_ms }} -# 最大快照数 +# Maximum Snapshots server.maxSnapshotLimit={{ snap_max_snapshot_limit }} -# 同时执行转储的线程数 +# Number of threads simultaneously executing dump server.snapshotCoreThreadNum={{ snap_snapshot_core_thread_num }} -# mds session 时间 +# Mds session time server.mdsSessionTimeUs={{ file_expired_time_us }} -# 每个线程同时进行ReadChunkSnapshot和转储的快照分片数量 +# Number of snapshot shards per thread that simultaneously perform ReadChunkSnapshots and dumps server.readChunkSnapshotConcurrency={{ snap_read_chunk_snapshot_concurrency }} # for clone -# 用于Lazy克隆元数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone metadata section server.stage1PoolThreadNum={{ snap_stage1_pool_thread_num }} -# 用于Lazy克隆数据部分的线程池线程数 +# Number of thread pool threads used for Lazy clone data section server.stage2PoolThreadNum={{ snap_stage2_pool_thread_num }} -# 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 +# Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces server.commonPoolThreadNum={{ snap_common_pool_thread_num }} -# CloneTaskManager 后台线程扫描间隔 +# CloneTaskManager backend thread scan interval server.cloneTaskManagerScanIntervalMs={{ snap_clone_task_manager_scan_interval_ms }} -# clone chunk分片大小 +# Clone chunk shard size server.cloneChunkSplitSize={{ snap_clone_chunk_split_size }} -# 克隆临时目录 +# Clone temporary directory server.cloneTempDir={{ snap_clone_temp_dir }} -# CreateCloneChunk同时进行的异步请求数量 +# Number of asynchronous requests made simultaneously by CreateCloneChunk server.createCloneChunkConcurrency={{ snap_create_clone_chunk_concurrency }} -# RecoverChunk同时进行的异步请求数量 +# Number of asynchronous requests simultaneously made by RecoverChunk server.recoverChunkConcurrency={{ snap_recover_chunk_concurrency }} -# CloneServiceManager引用计数后台扫描每条记录间隔 +# CloneServiceManager reference counting background scan every record interval server.backEndReferenceRecordScanIntervalMs={{ snap_clone_backend_ref_record_scan_interval_ms }} -# CloneServiceManager引用计数后台扫描每轮记录间隔 +# CloneServiceManager Reference Count Background Scan Every Record Interval server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_interval_ms }} # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -81,11 +81,11 @@ server.backEndReferenceFuncScanIntervalMs={{ snap_clone_backend_ref_func_scan_in {% endfor -%} etcd.endpoint={{ etcd_address | join(',') }} -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client etcd.dailtimeoutMs={{ snap_etcd_dailtimeout_ms }} -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations etcd.operation.timeoutMs={{ snap_etcd_operation_timeout_ms }} -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried etcd.retry.times={{ snap_etcd_retry_times }} # wait dlock timeout etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} @@ -93,20 +93,20 @@ etcd.dlock.timeoutMs={{ snap_etcd_dlock_timeout_ms }} etcd.dlock.ttlSec={{ snap_etcd_dlock_ttl_sec }} # -# leader选举相关参数 +# Leader election related parameters # -# leader lock名称 +# Leader lock name leader.campagin.prefix={{ snap_leader_campagin_prefix }} -# leader竞选时会创建session, 单位是秒(go端代码的接口这个值的单位就是s) -# 该值和etcd集群election timeout相关. -# etcd的server端限制了该值最小为1.5 * election timeout -# 建议设置etcd集群election timeout为1s +# During the leader election, a session is created in seconds (the unit of the value for the interface of the go code is s) +# This value is related to the ETCD cluster selection timeout +# The server side of ETCD limits this value to a minimum of 1.5 * election timeout +# Suggest setting the ETCD cluster selection timeout to 1 second leader.session.intersec={{ snap_leader_session_inter_sec }} -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误 +# The timeout period for the leader election. If it is 0 and the election is unsuccessful, it will continue to block. If it is greater than 0, it will be in the selectionTimeoutMs time +# If a leader is not selected, an error will be returned leader.election.timeoutms={{ snap_leader_election_timeout_ms }} # -# dummyserver相关配置 +# Dummyserver related configurations # server.dummy.listen.port={{ snapshot_dummy_port }} diff --git a/curve-ansible/roles/generate_config/templates/tools.conf.j2 b/curve-ansible/roles/generate_config/templates/tools.conf.j2 index 6207e8a4ef..b630b3dfe3 100644 --- a/curve-ansible/roles/generate_config/templates/tools.conf.j2 +++ b/curve-ansible/roles/generate_config/templates/tools.conf.j2 @@ -1,4 +1,4 @@ -# mds地址 +# Mds address {% set mds_address=[] -%} {% for host in groups.mds -%} {% set mds_ip = hostvars[host].ansible_ssh_host -%} @@ -8,13 +8,13 @@ mdsAddr={{ mds_address | join(',') }} # mds dummy port mdsDummyPort={{ hostvars[groups.mds[0]].mds_dummy_port }} -# 发送rpc的超时时间 +# Time out for sending rpc rpcTimeout={{ tool_rpc_timeout }} -# rpc重试次数 +# RPC retry count rpcRetryTimes={{ tool_rpc_retry_times }} # the rpc concurrency to chunkserver rpcConcurrentNum={{ tool_rpc_concurrent_num }} -# etcd地址 +# ETCD address {% set etcd_address=[] -%} {% for host in groups.etcd -%} {% set etcd_ip = hostvars[host].ansible_ssh_host -%} @@ -23,7 +23,7 @@ rpcConcurrentNum={{ tool_rpc_concurrent_num }} {% endfor -%} etcdAddr={{ etcd_address | join(',') }} {% if groups.snapshotclone is defined and groups.snapshotclone[0] is defined %} -# snapshot clone server 地址 +# Snapshot clone server address {% set snap_address=[] -%} {% for host in groups.snapshotclone -%} {% set snap_ip = hostvars[host].ansible_ssh_host -%} diff --git a/curve-ansible/roles/install_package/files/disk_uuid_repair.py b/curve-ansible/roles/install_package/files/disk_uuid_repair.py index eb48728e2e..77df4b83d9 100644 --- a/curve-ansible/roles/install_package/files/disk_uuid_repair.py +++ b/curve-ansible/roles/install_package/files/disk_uuid_repair.py @@ -17,8 +17,8 @@ # limitations under the License. # -# 检测磁盘上disk.meta中记录的uuid与当前磁盘的实际uuid是否相符合 -# 如果不符合, 更新为当前的uuid +# Check if the uuid recorded in disk.meta on the disk matches the actual uuid of the current disk +# If not, update to the current uuid import os import hashlib @@ -26,12 +26,12 @@ import subprocess def __get_umount_disk_list(): - # 获取需要挂载的设备 + # Obtain devices that need to be mounted cmd = "lsblk -O|grep ATA|awk '{print $1}'" out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) devlist = out_msg.splitlines() - # 查看当前设备的挂载状况 + # View the mounting status of the current device umount = [] for dev in devlist: cmd = "lsblk|grep " + dev + "|awk '{print $7}'" @@ -64,7 +64,7 @@ def __analyse_uuid(kv): return "" else: uuidmd5 = uuidmd5kv[1].replace("\n", "") - # 校验 + # Verification if (hashlib.md5(uuid).hexdigest() != uuidmd5): print("uuid[%s] not match uuidmd5[%s]" % (uuid, uuidmd5)) return "" @@ -72,14 +72,14 @@ def __analyse_uuid(kv): def __get_recorduuid(disk): uuid = "" - # 将磁盘挂载到临时目录 + # Mount the disk to a temporary directory cmd = "mkdir -p /data/tmp; mount " + disk + " /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: print("Get record uuid in %s fail." % disk) return False, uuid - # 挂载成功,获取记录的uuid + # Successfully mounted, obtaining the recorded uuid try: cmd = "cat /data/tmp/disk.meta" out_msg = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) @@ -94,7 +94,7 @@ def __get_recorduuid(disk): except subprocess.CalledProcessError as e: print("Get file disk.meta from %s fail, reason: %s." % (disk, e)) - # 卸载磁盘 + # Unmount Disk cmd = "umount " + disk + "; rm -fr /data/tmp" retCode = subprocess.call(cmd, shell=True) if retCode != 0: @@ -117,16 +117,16 @@ def __cmp_recorduuid_with_actual(umountDisk): recordList = {} actualList = {} for disk in umountDisk: - # 获取当前disk上记录的uuid + # Obtain the uuid recorded on the current disk diskFullName = "/dev/" + disk opRes, recorduuid = __get_recorduuid(diskFullName) if opRes != True or len(recorduuid) == 0: return False, recordList, actualList - # 获取disk的实际uuid + # Obtain the actual uuid of the disk actualuuid = __get_actualuuid(disk).replace("\n", "") - # 比较记录的和实际的是否相同 + # Compare whether the recorded and actual values are the same if actualuuid != recorduuid: recordList[disk] = recorduuid actualList[disk] = actualuuid @@ -137,7 +137,7 @@ def __cmp_recorduuid_with_actual(umountDisk): def __mount_with_atual_uuid(diskPath, record, actual): print("%s uuid change from [%s] to [%s]." % (diskPath, record, actual)) - # 从/etc/fstab中获取对应的挂载目录 + # Obtain the corresponding mount directory from/etc/fstab mntdir = "" try: cmd = "grep " + record + " /etc/fstab | awk -F \" \" '{print $2}'" @@ -146,7 +146,7 @@ def __mount_with_atual_uuid(diskPath, record, actual): print("Get mount dir for %s fail. error: %s." % (diskPath, e)) return False - # 将actual挂载到相应的目录下 + # Mount the actual to the corresponding directory cmd = "mount " + diskPath + " " + mntdir retCode = subprocess.call(cmd, shell=True) if retCode !=0: @@ -155,7 +155,7 @@ def __mount_with_atual_uuid(diskPath, record, actual): print("mount %s to %s success." % (diskPath, mntdir)) replaceCmd = "sed -i \"s/" + record + "/" + actual + "/g\"" - # 将新的uuid写入到fstab + #Write the new uuid to fstab cmd = "cp /etc/fstab /etc/fstab.bak;" + replaceCmd + " /etc/fstab > /dev/null" retCode = subprocess.call(cmd, shell=True) if retCode !=0: @@ -163,7 +163,7 @@ def __mount_with_atual_uuid(diskPath, record, actual): return False print("modify actual uuid to /etc/fstab for disk %s success." % diskPath) - # 将新的uuid写入到diskmeta + #Write the new uuid to diskmeta fileFullName = mntdir + "/disk.meta" filebakName = fileFullName + ".bak" cpcmd = "cp " + fileFullName + " " + filebakName @@ -184,7 +184,7 @@ def __handle_inconsistent(umountDisk, record, actual): if disk not in record: print("record uuid and actual uuid of %s is same, please check other reason" % disk) continue - # 按照actual uuid做挂载 + # Mount according to the actual uuid res = __mount_with_atual_uuid("/dev/" + disk, record[disk], actual[disk]) if res: continue @@ -193,18 +193,18 @@ def __handle_inconsistent(umountDisk, record, actual): return True if __name__ == "__main__": - # 查看未挂载成功的磁盘设备列表 + # View the list of disk devices that were not successfully mounted umountDisk = __get_umount_disk_list() if len(umountDisk) == 0: print("All disk mount success.") exit(0) - # 查看是否之前已经挂载过 + # Check if it has been previously mounted if __uninit(): print("Please init env with chunkserver_ctl.sh first.") exit(0) - # 查看当前未挂载成功的磁盘设备记录的uuid和实际uuid + # View the uuid and actual uuid of disk devices that have not been successfully mounted currently cmpRes, record, actual = __cmp_recorduuid_with_actual(umountDisk) if cmpRes == False: print("Compare record uuid with actual uuid fail.") @@ -213,7 +213,7 @@ def __handle_inconsistent(umountDisk, record, actual): print("Record uuid with actual uuid all consistent.") exit(0) - # 将不一致的磁盘按照当前的uuid重新挂载 + #Remount inconsistent disks according to the current uuid if __handle_inconsistent(umountDisk, record, actual): print("fix uuid-changed disk[%s] success." % umountDisk) exit(0) diff --git a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 index cba41adfcd..d44a03c682 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_ctl.sh.j2 @@ -6,7 +6,7 @@ dataDir={{ chunkserver_data_dir }} raftLogProtocol={{ chunkserver_raft_log_procotol }} source ./chunkserver_deploy.sh -# 使用方式 +# Usage function help() { echo "COMMANDS:" echo " start : start chunkserver" @@ -50,18 +50,18 @@ function ip_value() { }' } -# 从subnet获取ip +# Obtain IP from subnet function get_ip_from_subnet() { subnet=$1 prefix=$(ip_value $subnet) mod=`echo $subnet|awk -F/ '{print $2}'` mask=$((2**32-2**(32-$mod))) - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) ip= for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -76,7 +76,7 @@ function get_ip_from_subnet() { fi } -# 启动chunkserver +# Start chunkserver function start() { if [ $# -lt 1 ] then @@ -87,7 +87,7 @@ function start() { then confPath=$3 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "confPath $confPath not exist!" @@ -104,7 +104,7 @@ function start() { get_ip_from_subnet $external_subnet external_ip=$ip enableExternalServer=true - # external ip和internal ip一致或external ip为127.0.0.1时不启动external server + # Do not start the external server when the external IP and internal IP are consistent or when the external IP is 127.0.0.1 if [ $internal_ip = $external_ip -o $external_ip = "127.0.0.1" ] then enableExternalServer=false @@ -148,7 +148,7 @@ function start_one() { fi jemallocpath={{ jemalloc_path }} - # 检查jemalloc库文件 + # Check the Jemalloc library file if [ ! -f ${jemallocpath} ] then echo "Not found jemalloc library, Path is ${jemallocpath}" @@ -230,7 +230,7 @@ function restart() { } function wait_stop() { - # wait 3秒钟让它退出 + # Wait for 3 seconds to exit retry_times=0 while [ $retry_times -le 3 ] do @@ -244,7 +244,7 @@ function wait_stop() { break fi done - # 如果进程还在,就kill -9 + # If the process is still in progress, kill -9 ps -efl|grep -E "curve-chunkserver .*${dataDir}/chunkserver$1 "|grep -v grep > /dev/null 2>&1 if [ $? -eq 0 ] then @@ -325,12 +325,12 @@ function deploy() { } function format() { - # 格式化chunkfile pool + Format chunkfile pool curve-format $* } function recordmeta() { - # 将当前的磁盘的uuid及其md5备份到磁盘的disk.meta文件中 + # Back up the current disk's uuid and its md5 to the disk's disk.meta file meta_record; } diff --git a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 index db8566728a..7f84ccd28f 100644 --- a/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 +++ b/curve-ansible/roles/install_package/templates/chunkserver_deploy.sh.j2 @@ -1,5 +1,5 @@ #!/bin/bash -#confirm提示,防止误操作 +# confirm prompt to prevent misoperation dataDir={{ chunkserver_data_dir }} function do_confirm { echo "This deployment script will format the disk and delete all the data." @@ -24,14 +24,14 @@ diskList="{{ dlist | join('\n') }}" {% endif %} function deploy_prep { -#清理/etc/fstab残留信息 +# Clean up/etc/fstab residual information grep curvefs /etc/fstab if [ $? -eq 0 ] then sed -i '/curvefs/d' /etc/fstab sed -i '/chunkserver/d' /etc/fstab fi -#将数据盘挂载的目录都卸载掉,为下一步格式化磁盘做准备 +# Uninstall all directories mounted on the data disk to prepare for the next step of formatting the disk for i in `{{ get_disk_list_cmd }}` do mntdir=`lsblk|grep $i|awk '{print $7}'` @@ -49,7 +49,7 @@ function deploy_prep { fi done } -#记录磁盘的盘符信息和磁盘的wwn信息,将信息持久化到diskinfo文件 +# Record the disk letter information and the disk's wwn information, and persist the information to the diskinfo file declare -A disk_map diskinfo=./diskinfo function record_diskinfo { @@ -69,7 +69,7 @@ function record_diskinfo { done } -#根据磁盘数量创建数据目录和日志目录,目前的数据目录格式统一是$dataDir/chunkserver+num,日志目录在$dataDir/log/chunkserver+num +# Create a data directory and log directory based on the number of disks. The current data directory format is $dataDir/chunkserver+num, and the log directory is in $dataDir/log/chunkserver+num function chunk_dir_prep { if [ -d ${dataDir} ] then @@ -90,7 +90,7 @@ function chunk_dir_prep { mkdir -p ${dataDir}/log/chunkserver$i done } -#格式化磁盘文件系统 +# Format Disk File System function disk_format { for disk in ${!disk_map[@]} do @@ -99,7 +99,7 @@ function disk_format { done } -#将创建好的数据目录按照顺序挂载到格式化好的磁盘上,并记录挂载信息到mount.info +# Mount the created data directory onto the formatted disk in order and record the mounting information to mount-info function mount_dir { while [ 1 ] do @@ -128,7 +128,7 @@ function mount_dir { lsblk > ./mount.info } -#持久化挂载信息到fstab文件,防止系统重启后丢失 +# Persist mounting information to fstab file to prevent loss after system restart function fstab_record { grep curvefs /etc/fstab if [ $? -ne 0 ] @@ -141,7 +141,7 @@ function fstab_record { fi } -#将当前的uuid持久化到磁盘上做备份,防止系统重启后uuid发生变化 +# Persist the current uuid to disk for backup to prevent changes in uuid after system restart function meta_record { grep curvefs /etc/fstab if [ $? -eq 0 ] @@ -158,7 +158,7 @@ function meta_record { fi } -#初始化chunkfile pool +# Initialize chunkfile pool function chunkfile_pool_prep { ret=`lsblk|grep chunkserver|wc -l` for i in `seq 0 $((${ret}-1))` @@ -224,20 +224,20 @@ function deploy_all { function deploy_one { local diskname=$1 local dirname=$2 - #目录不存在 + # Directory does not exist if [ ! -d $dirname ] then echo "$dirname is not exist!" exit 1 fi - #磁盘正在挂载使用 + # Disk is being mounted for use mount | grep -w $diskname if [ $? -eq 0 ] then echo "$diskname is being used" exit 1 fi - #目录正在挂载使用 + # Directory is being mounted for use mount | grep -w $dirname if [ $? -eq 0 ] then @@ -265,7 +265,7 @@ function deploy_one { done mount $diskname $dirname lsblk > ./mount.info - #更新fstab + # Update fstab short_diskname=`echo $diskname|awk -F"/" '{print $3}'` ls -l /dev/disk/by-uuid|grep -w $short_diskname if [ $? -ne 0 ] @@ -275,12 +275,12 @@ function deploy_one { fi uuid=`ls -l /dev/disk/by-uuid/|grep -w ${short_diskname}|awk '{print $9}'` echo "UUID=$uuid $dirname ext4 rw,errors=remount-ro 0 0" >> /etc/fstab - # 将uuid及其md5写到diskmeta中 + # Write uuid and its md5 to diskmeta uuidmd5=`echo -n $uuid | md5sum | cut -d ' ' -f1` touch $dirname/disk.meta echo "uuid=$uuid" > $dirname/disk.meta echo "uuidmd5=$uuidmd5" >> $dirname/disk.meta - #格式化chunkfile pool + # Format chunkfile pool curve-format -allocatePercent={{ chunk_alloc_percent }} \ -filePoolDir=$dirname/chunkfilepool \ diff --git a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 index 6c0b36c932..9aadcb311f 100644 --- a/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/etcd-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# 默认配置文件 +# Default configuration file confPath={{ etcd_config_path }} -# 日志文件目录 +# Log file directory logDir={{ etcd_log_dir }} -# 日志文件路径 +# Log file path logPath=${logDir}/etcd.log # pidfile @@ -15,9 +15,9 @@ pidFile=${HOME}/etcd.pid # daemon log daemonLog=${logDir}/daemon-etcd.log -# 启动etcd +# Start etcd function start_etcd() { - # 创建logDir + # Create logDir mkdir -p ${logDir} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -25,14 +25,14 @@ function start_etcd() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logDir} ] then echo "Write permission denied: ${logDir}" exit 1 fi - # 检查logPath是否可写或者是否能够创建 + # Check if logPath is writable or can be created touch ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -40,7 +40,7 @@ function start_etcd() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -48,28 +48,28 @@ function start_etcd() { exit fi - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查是否安装etcd + # Check if etcd is installed if [ -z `command -v etcd` ] then echo "No etcd installed" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found confFile, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -77,8 +77,8 @@ function start_etcd() { exit fi - # pidfile不存在 或 daemon进程不存在 - # 启动daemon,切换路径,并启动etcd + # The pidfile does not exist or the daemon process does not exist + # Start the daemon, switch paths, and start ETCD daemon --name etcd --core \ @@ -90,9 +90,9 @@ function start_etcd() { -- {{ install_etcd_dir }}/etcd --config-file ${confPath} } -# 停止daemon进程和etcd +# Stop the daemon process and ETCD function stop_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -112,7 +112,7 @@ function stop_etcd() { # restart function restart_etcd() { - # 判断是否已经通过daemon启动了etcd + # Determine if ETCD has been started through daemon daemon --name etcd --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -127,7 +127,7 @@ function restart_etcd() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " etcd-daemon start -- start deamon process and watch on etcd process" @@ -139,7 +139,7 @@ function usage() { echo " etcd-daemon start -c /etcd/etcd.conf.yml -l ${HOME}/etcd.log" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -150,7 +150,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 @@ -176,11 +176,11 @@ case $1 in start_etcd ;; "stop") - # 停止daemon和etcd进程 + # Stop the daemon and etcd processes stop_etcd ;; "restart") - # 重启etcd + # Restart etcd restart_etcd ;; *) diff --git a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 index 6d69e6d47d..81f55b7ed7 100644 --- a/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/mds-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-mds路径 +# curve-mds path curveBin={{ curve_bin_dir }}/curve-mds -# 默认配置文件 +# Default configuration file confPath={{ mds_config_path }} -# 日志文件路径 +# Log file path logPath={{ mds_log_dir }} # mdsAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动mds +# Start mds function start_mds() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit 1 fi - # 检查curve-mds + # Check curve-mds if [ ! -f ${curveBin} ] then echo "No curve-mds installed" exit 1 fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found mds.conf, Path is ${confPath}" exit 1 fi - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_mds() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_mds() { exit 1 fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者是否能够创建 + # Check if consoleLog is writable or can be created touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_mds() { exit 1 fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_mds() { exit 1 fi - # 未指定mdsAddr, 从配置文件中解析出网段 + # No mdsAddr specified, resolving network segment from configuration file if [ -z ${mdsAddr} ] then subnet=`cat $confPath|grep global.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_mds() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_mds() { show_status } -# 停止daemon进程,且停止curve-mds +# Stop the daemon process and stop the curve-mds function stop_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_mds() { # restart function restart_mds() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_mds() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-mds + # Determine if curve-mds has been started through daemon daemon --name curve-mds --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load mds configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, mds应该没有起来 + # If there are no leader related logs in the logs after load mds configuration + # So leaderAddr is empty, and mds should not be up if [ -z ${leaderAddr} ] then echo "MDS may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current MDS is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " ./mds-daemon.sh start -- start deamon process and watch on curve-mds process" @@ -222,7 +222,7 @@ function usage() { echo " ./mds-daemon.sh start -c /etc/curve/mds.conf -l ${HOME}/ -a 127.0.0.1:6666" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 index 50bdc2a07e..d170963075 100644 --- a/curve-ansible/roles/install_package/templates/nebd-daemon.j2 +++ b/curve-ansible/roles/install_package/templates/nebd-daemon.j2 @@ -133,7 +133,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -174,7 +174,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -262,7 +262,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -278,7 +278,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 index 4d7edae130..169ff2b84d 100644 --- a/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 +++ b/curve-ansible/roles/install_package/templates/snapshot-daemon.sh.j2 @@ -1,12 +1,12 @@ #!/bin/bash -# curve-snapshotcloneserver路径 +# curve-snapshotcloneserver path curveBin={{ curve_bin_dir }}/curve-snapshotcloneserver -# 默认配置文件 +# Default configuration file confPath={{ snapshot_config_path }} -# 日志文件路径 +# Log file path logPath={{ snapshot_clone_server_log_dir }} # serverAddr @@ -28,30 +28,30 @@ function ip_value() { }' } -# 启动snapshotcloneserver +# Starting snapshotcloneserver function start_server() { - # 检查daemon + # Check the daemon if ! type daemon &> /dev/null then echo "No daemon installed" exit fi - # 检查curve-snapshotcloneserver + # Check the curve-snapshotcloneserver if [ ! -f ${curveBin} ] then echo "No curve-snapshotcloneserver installed, Path is ${curveBin}" exit fi - # 检查配置文件 + # Check configuration file if [ ! -f ${confPath} ] then echo "Not found snapshot_clone_server.conf, Path is ${confPath}" exit fi - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -eq 0 ] then @@ -59,7 +59,7 @@ function start_server() { exit fi - # 创建logPath + # Create logPath mkdir -p ${logPath} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -67,14 +67,14 @@ function start_server() { exit fi - # 检查logPath是否有写权限 + # Check if logPath has write permission if [ ! -w ${logPath} ] then echo "Write permission denied: ${logPath}" exit 1 fi - # 检查consoleLog是否可写或者能否创建,初始化glog之前的日志存放在这里 + # Check if the consoleLog can be written or created, and store the logs before initializing the glog here touch ${consoleLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -82,7 +82,7 @@ function start_server() { exit fi - # 检查daemonLog是否可写或者是否能够创建 + # Check if the daemonLog is writable or can be created touch ${daemonLog} > /dev/null 2>&1 if [ $? -ne 0 ] then @@ -90,7 +90,7 @@ function start_server() { exit fi - # 未指定serverAddr, 从配置文件中解析出网段 + # No serverAddr specified, resolving network segment from configuration file if [ -z ${serverAddr} ] then subnet=`cat $confPath|grep server.subnet|awk -F"=" '{print $2}'` @@ -101,11 +101,11 @@ function start_server() { ip= echo "subnet: $subnet" echo "port: $port" - # 对prefix再取一次模,为了支持10.182.26.50/22这种格式 + # Take the module again for the prefix to support the format 10.182.26.50/22 prefix=$(($prefix&$mask)) for i in `/sbin/ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"` do - # 把ip转换成整数 + # Convert IP to an integer ip_int=$(ip_value $i) if [ $(($ip_int&$mask)) -eq $prefix ] then @@ -132,9 +132,9 @@ function start_server() { show_status } -# 停止daemon进程和curve-snapshotcloneserver +# Stop the daemon process and curve-snapshotcloneserver function stop_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -154,7 +154,7 @@ function stop_server() { # restart function restart_server() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -171,7 +171,7 @@ function restart_server() { # show status function show_status() { - # 判断是否已经通过daemon启动了curve-snapshotcloneserver + # Determine if the curve-snapshotcloneserver has been started through daemon daemon --name curve-snapshotcloneserver --pidfile ${pidFile} --running if [ $? -ne 0 ] then @@ -179,11 +179,11 @@ function show_status() { exit 1 fi - # 查询leader的IP + # Query the IP of the leader leaderAddr=`tac ${consoleLog}|grep -a -m 1 -B 1000000 "Logging before InitGoogleLogging()"|grep "leader"|grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}"|head -n1` - # 如果load configuration之后的日志,没有leader相关日志 - # 那么leaderAddr为空, snapshotcloneserver应该没有起来 + # If there are no leader related logs in the logs after load configuration + # So the leaderAddr is empty, and the snapshotcloneserver should not be up if [ -z ${leaderAddr} ] then echo "SnapshotClone may not start successfully, check log" @@ -194,7 +194,7 @@ function show_status() { then echo "Current SnapshotClone is LEADER" else - # 查询是否和自身ip相等 + # Check if it is equal to its own IP address for ip in `(hostname -I)` do if [ ${leaderAddr} = ${ip} ] @@ -208,7 +208,7 @@ function show_status() { fi } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " snapshot-daemon start -- start deamon process and watch on curve-snapshotcloneserver process" @@ -222,7 +222,7 @@ function usage() { echo " snapshot-daemon start -c /etc/curve/snapshot_clone_server.conf -l ${HOME}/ -a 127.0.0.1:5555" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ] then usage @@ -233,7 +233,7 @@ case $1 in "start") shift # pass first argument - # 解析参数 + # Parsing parameters while [[ $# -gt 1 ]] do key=$1 diff --git a/curve-ansible/roles/install_package/vars/main.yml b/curve-ansible/roles/install_package/vars/main.yml index ee545c1d7b..8967883b7c 100644 --- a/curve-ansible/roles/install_package/vars/main.yml +++ b/curve-ansible/roles/install_package/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 包的名称 +# The name of the package package_name: package_version: lib_installed: false diff --git a/curve-ansible/roles/restart_service/defaults/main.yml b/curve-ansible/roles/restart_service/defaults/main.yml index 061c32a4ec..0051d42ecc 100644 --- a/curve-ansible/roles/restart_service/defaults/main.yml +++ b/curve-ansible/roles/restart_service/defaults/main.yml @@ -16,7 +16,7 @@ # check_health: False -# 启动一个chunkserver需要的最大时间 +# The maximum time required to start a chunkserver restart_chunkserver_async: 100 restart_chunkserver_check_delay: 5 restart_chunkserver_check_times: 20 diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml index d74b05abc7..6b3050bb01 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_mds.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取mds的版本 + # Obtain the version of mds - name: get curve version vars: metric_port: "{{ mds_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml index 73f6bcf636..966d9b95d6 100644 --- a/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml +++ b/curve-ansible/roles/restart_service/tasks/include/restart_snapshotclone.yml @@ -17,7 +17,7 @@ - name: Determine need restart or not block: - # 获取snapshotclone的版本 + # Obtain the version of snapshotclone - name: get snapshotclone version vars: metric_port: "{{ snapshot_dummy_port }}" diff --git a/curve-ansible/roles/restart_service/tasks/main.yml b/curve-ansible/roles/restart_service/tasks/main.yml index befb68b5b3..a8b077a3a4 100644 --- a/curve-ansible/roles/restart_service/tasks/main.yml +++ b/curve-ansible/roles/restart_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 重启对应的服务 +# Restart the corresponding service - name: restart_service include_tasks: "include/restart_{{ service_name }}.yml" diff --git a/curve-ansible/roles/restart_service/vars/main.yml b/curve-ansible/roles/restart_service/vars/main.yml index 94f0bad0c6..44f7d6797e 100644 --- a/curve-ansible/roles/restart_service/vars/main.yml +++ b/curve-ansible/roles/restart_service/vars/main.yml @@ -15,7 +15,7 @@ # limitations under the License. # -# 服务的名称 +# Name of service service_name: need_restart: true sudo: "" diff --git a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml index 82478df03e..f2a67fdba1 100644 --- a/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml +++ b/curve-ansible/roles/set_leader_and_follower_list/vars/main.yml @@ -1,4 +1,4 @@ -# 服务的名称 +# Name of service service_name: leader_ip: all_ip: diff --git a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml index 25fecb2337..32602a56cd 100644 --- a/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml +++ b/curve-ansible/roles/start_service/tasks/include/start_chunkserver.yml @@ -27,7 +27,7 @@ poll: "{{ service_poll }}" failed_when: start_chunkserver_res.rc != 0 or "down" in start_chunkserver_res.stdout -# 打印控制台输出 +# Print Console Output - name: print console output debug: var: start_chunkserver_res.stdout_lines diff --git a/curve-ansible/roles/start_service/tasks/main.yml b/curve-ansible/roles/start_service/tasks/main.yml index 483dfd5d9a..be93405394 100644 --- a/curve-ansible/roles/start_service/tasks/main.yml +++ b/curve-ansible/roles/start_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: start_service include_tasks: "include/start_{{ service_name }}.yml" diff --git a/curve-ansible/roles/stop_service/tasks/main.yml b/curve-ansible/roles/stop_service/tasks/main.yml index 0b2bbb486e..d3b8cbd018 100644 --- a/curve-ansible/roles/stop_service/tasks/main.yml +++ b/curve-ansible/roles/stop_service/tasks/main.yml @@ -15,6 +15,6 @@ # limitations under the License. # -# 启动对应的服务 +# Start the corresponding service - name: stop_service include_tasks: "include/stop_{{ service_name }}.yml" diff --git a/curve-ansible/rolling_update_curve.yml b/curve-ansible/rolling_update_curve.yml index fddd6832bf..61949f9f8f 100644 --- a/curve-ansible/rolling_update_curve.yml +++ b/curve-ansible/rolling_update_curve.yml @@ -83,7 +83,7 @@ - { role: generate_config, template_name: topo.json, conf_path: "{{ topo_file_path }}", tags: ["generate_config", "generage_topo_json"] } -# 获取leader节点和follower节点 +# Obtain the leader and follower nodes - name: set mds leader and follower list hosts: mds gather_facts: no @@ -95,7 +95,7 @@ roles: - { role: set_leader_and_follower_list, service_name: mds } -# 按顺序先升级follower节点,再升级leader节点 +# Upgrade the follower node first in order, and then upgrade the leader node - name: update follower and leader server in sequence hosts: mds_servers_followers, mds_servers_leader any_errors_fatal: true @@ -110,14 +110,14 @@ - pause: prompt: "Confirm restart mds in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启mds + # Restart mds roles: - { role: restart_service, service_name: mds, expected_curve_version: "{{ mds_package_version }}", command_need_sudo: "{{ mds_need_sudo | bool }}"} ############################## rolling update chunkserver ############################## -# 1. 更新各节点上的配置 +# 1. Update the configuration on each node - name: prepare chunkserver hosts: chunkservers any_errors_fatal: true @@ -136,8 +136,8 @@ - { role: generate_config, template_name: s3.conf, conf_path: "{{ chunkserver_s3_config_path }}", tags: ["generate_config", "generage_cs_s3_conf"] } -# 逐个重启chunkserver,每重启完一个需要等待copyset健康 -# 继续操作下一个的的时候还需要一个命令行交互确认 +# Restart the chunkservers one by one, and wait for the copyset to be healthy after each restart +# When continuing with the next operation, a command line interaction confirmation is also required - name: restart chunkserver and wait healthy hosts: chunkservers any_errors_fatal: true @@ -203,7 +203,7 @@ - pause: prompt: "Confirm restart snapshotclone in {{ inventory_hostname }}. ENTER to continue or CTRL-C A to quit" when: need_confirm | bool - # 重启snapshot clone + # Restart snapshot clone roles: - { role: restart_service, service_name: snapshotclone, expected_curve_version: "{{ snapshot_package_version }}", command_need_sudo: "{{ snapshot_need_sudo | bool }}" } diff --git a/curve-ansible/server.ini b/curve-ansible/server.ini index eaca5a4515..7e06fbe105 100644 --- a/curve-ansible/server.ini +++ b/curve-ansible/server.ini @@ -14,8 +14,8 @@ localhost ansible_ssh_host=127.0.0.1 [zone1] localhost ansible_ssh_host=127.0.0.1 -# 请确保zone内机器数量一致,如果有多个zone,则在上面根据zone1格式增加zone2,zone3...即可。 -# 如果zone下面有多个机器,则换行一起列出来即可。比如: +# Please ensure that the number of machines in the zone is consistent. If there are multiple zones, add zone2, zone3... based on the zone1 format above. +# If there are multiple machines under the zone, they can be listed together in a new line. For example: # [zone1] # localhost ansible_ssh_host=127.0.0.1 # localhost2 ansible_ssh_host=127.0.0.2 @@ -32,7 +32,7 @@ mds_subnet=127.0.0.1/22 defined_healthy_status="cluster is healthy" mds_package_version="0.0.6.1+160be351" tool_package_version="0.0.6.1+160be351" -# 启动命令是否用sudo +# Whether to use sudo for startup command mds_need_sudo=True mds_config_path=/etc/curve/mds.conf mds_log_dir=/data/log/curve/mds @@ -90,7 +90,7 @@ chunkserver_subnet=127.0.0.1/22 global_enable_external_server=True chunkserver_external_subnet=127.0.0.1/22 chunkserver_s3_config_path=/etc/curve/cs_s3.conf -# chunkserver使用的client相关的配置 +# Client related configurations used by chunkserver chunkserver_client_config_path=/etc/curve/cs_client.conf client_register_to_mds=False client_chunkserver_op_max_retry=3 @@ -149,10 +149,10 @@ sudo_or_not=True ansible_become_user=curve ansible_become_flags=-iu curve update_config_with_puppet=False -# 启动服务要用到ansible的异步操作,否则ansible退出后chunkserver也会退出 -# 异步等待结果的总时间 +# Starting the service requires the asynchronous operation of ansible, otherwise the chunkserver will also exit after ansible exits +# Total time waiting for results asynchronously service_async=5 -# 异步查询结果的间隔 +# Interval between asynchronous query results service_poll=1 install_with_deb=False restart_directly=False diff --git a/curvefs/conf/curvebs_client.conf b/curvefs/conf/curvebs_client.conf index e0eb4d70f2..23fc37b087 100644 --- a/curvefs/conf/curvebs_client.conf +++ b/curvefs/conf/curvebs_client.conf @@ -1,29 +1,29 @@ # -################### mds一侧配置信息 ################## +################### MDS side configuration information################## # -# mds的地址信息,对于mds集群,地址以逗号隔开 +# Address information for mds, separated by commas for mds clusters mds.listen.addr=127.0.0.1:6666 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的rpc超时时间 +# RPC timeout for communication with mds mds.rpcTimeoutMS=500 -# 与mds通信rpc最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout for rpc communication with mds, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 与mds通信重试总时间 +# Total retry time for communication with mds mds.maxRetryMS=8000 -# 在当前mds上连续重试次数超过该限制就切换, 这个失败次数包含超时重试次数 +# Switch if the number of consecutive retries on the current mds exceeds this limit, which includes the number of timeout retries mds.maxFailedTimesBeforeChangeMDS=2 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # The normal retry times for trigger wait strategy @@ -36,123 +36,123 @@ mds.maxRetryMsInIOPath=86400000 mds.waitSleepMs=10000 # -################# metacache配置信息 ################ +################# metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=500 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=5 -# 获取leader接口每次重试之前需要先睡眠一段时间 +# Obtaining the leader interface requires a period of sleep before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +############### Configuration information of the scheduling layer ############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=1000000 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of execution threads in the queue +# What the executing thread needs to do is to retrieve the IO, then send it to the network and return to retrieve the next network task. A task starts from +# The RPC request is approximately (20us-100us) from the time the queue is retrieved to the time it is sent, and 20us is the normal time when it is not necessary to obtain a leader +# If a leader needs to be obtained during sending, the time will be around 100us, and the throughput of one thread will be between 10w-50w +# The performance has met the requirements schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=1000000 -# 隔离qemu线程的任务队列线程池大小, 默认值为1个线程 +# The size of the task queue thread pool for isolating QEMU threads, with a default value of 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=100000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=2500000 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=10 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当底层chunkserver压力大时,可能也会触发unstable -# 由于copyset leader may change,会导致请求超时时间设置为默认值,从而导致IO hang -# 真正宕机的情况下,请求重试一定次数后会处理完成 -# 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 -# 当一个请求重试次数超过这个值时,其超时时间一定进入指数退避 +# When the underlying chunkserver is under high pressure, unstable may also be triggered +# Due to copyset leader may change, the request timeout time will be set to the default value, resulting in IO hang +# In the case of real downtime, the request will be processed after a certain number of retries +# If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic +# When the number of retries for a request exceeds this value, its timeout must enter exponential backoff chunkserver.minRetryTimesForceTimeoutBackoff=5 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 # -################# 文件级别配置项 ############# +################# File level configuration items ############# # -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=128 -# 文件IO下发到底层chunkserver最大的分片KB +# The maximum sharding KB for file IO distribution to the underlying chunkserver global.fileIOSplitMaxSizeKB=64 # -################# log相关配置 ############### +################# Log related configuration############### # # enable logging or not global.logging.enable=True # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=/data/log/curve/ # __CURVEADM_TEMPLATE__ /curvebs/client/logs __CURVEADM_TEMPLATE__ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -################# 读源卷相关配置 ############### +################# Read source volume related configurations############### # -# 读取源卷时打开的fd超时关闭时间300s +# Opening fd timeout when reading source volume, closing time 300s closefd.timeout=300 -# 读取源卷时打开的fd后台线程每600s扫描一遍fdMap,关闭超时fd +# When reading the source volume, open the fd backend thread to scan the fdMap every 600 seconds, and close the timeout fd closefd.timeInterval=600 # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 -# 是否关闭健康检查: true/关闭 false/不关闭 +# Whether to turn off health check: true/turn off false/do not turn off global.turnOffHealthCheck=true # diff --git a/curvefs/monitor/grafana-report.py b/curvefs/monitor/grafana-report.py index 016473a509..d9baf964a0 100644 --- a/curvefs/monitor/grafana-report.py +++ b/curvefs/monitor/grafana-report.py @@ -13,7 +13,7 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' sourcefile= '/etc/curvefs/monitor/grafana/report/report.tex' imagedir= '/etc/curvefs/monitor/grafana/report/images/' @@ -60,33 +60,33 @@ def attach_body(msgRoot): html_str = '%s' % (image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view the dashboard in Grafana (if displayed incorrectly, please check the attached PDF).

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) content = MIMEText(mailMsg,'html','utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Sending dashboard daily report email def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) dt = Time.strftime("%Y%m%d",time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % (clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join( to_address ) # Send to multiple recipients - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 + # The filename here can be anything, whatever name is written will be displayed in the email pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add the body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) diff --git a/curvefs/monitor/grafana/provisioning/dashboards/mds.json b/curvefs/monitor/grafana/provisioning/dashboards/mds.json index 09de6b31f7..a90a8f13c0 100644 --- a/curvefs/monitor/grafana/provisioning/dashboards/mds.json +++ b/curvefs/monitor/grafana/provisioning/dashboards/mds.json @@ -290,7 +290,7 @@ { "columns": [], "datasource": null, - "description": "mds的配置", + "description": "Configuration of MDS", "fieldConfig": { "defaults": { "custom": { @@ -336,7 +336,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -352,7 +352,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -368,7 +368,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", diff --git a/curvefs/src/mds/metaserverclient/metaserver_client.cpp b/curvefs/src/mds/metaserverclient/metaserver_client.cpp index 739704f62a..2eb4577988 100644 --- a/curvefs/src/mds/metaserverclient/metaserver_client.cpp +++ b/curvefs/src/mds/metaserverclient/metaserver_client.cpp @@ -372,7 +372,7 @@ FSStatusCode MetaserverClient::DeleteInode(uint32_t fsId, uint64_t inodeId) { request.set_partitionid(0); request.set_fsid(fsId); request.set_inodeid(inodeId); - // TODO(@威姐): 适配新的proto + // TODO(@ Wei Jie): Adapt to the new proto request.set_copysetid(1); request.set_poolid(1); request.set_partitionid(1); diff --git a/curvefs/src/metaserver/copyset/conf_epoch_file.h b/curvefs/src/metaserver/copyset/conf_epoch_file.h index abe14f2f8b..59bd9660b2 100644 --- a/curvefs/src/metaserver/copyset/conf_epoch_file.h +++ b/curvefs/src/metaserver/copyset/conf_epoch_file.h @@ -40,29 +40,29 @@ class ConfEpochFile { public: explicit ConfEpochFile(curve::fs::LocalFileSystem* fs) : fs_(fs) {} - /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + /** + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read epoch value + * @return 0, successful; - 1 failed */ int Load(const std::string& path, PoolId* poolId, CopysetId* copysetId, uint64_t* epoch); - /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + /** + * Serialize configuration version information and save it to a snapshot file. The format is as follows: + * The 'head' indicates the length and is in binary format. The rest is in text format for easy viewing when necessary. 'sync' ensures data persistence. + *| head |Configuration version information| + *| 8 bytes size_t | uint32_t | Variable length text | + *| length | crc32 | logic pool id | copyset id | epoch| + * The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded; - 1 failed */ int Save(const std::string& path, const PoolId poolId, const CopysetId copysetId, const uint64_t epoch); diff --git a/curvefs/src/metaserver/inflight_throttle.h b/curvefs/src/metaserver/inflight_throttle.h index fb670b6161..dfbe50bebf 100644 --- a/curvefs/src/metaserver/inflight_throttle.h +++ b/curvefs/src/metaserver/inflight_throttle.h @@ -30,7 +30,7 @@ namespace curvefs { namespace metaserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: @@ -40,8 +40,8 @@ class InflightThrottle { ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ bool IsOverLoad() { if (maxInflightRequest_ >= @@ -53,23 +53,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight request std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight request const uint64_t maxInflightRequest_; }; diff --git a/curvefs/test/mds/schedule/coordinator_test.cpp b/curvefs/test/mds/schedule/coordinator_test.cpp index e759da89ed..96045b8d58 100644 --- a/curvefs/test/mds/schedule/coordinator_test.cpp +++ b/curvefs/test/mds/schedule/coordinator_test.cpp @@ -149,20 +149,20 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + //First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取metaserver失败 + //Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + //3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -180,7 +180,7 @@ TEST_F(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + //Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); @@ -289,21 +289,21 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE( coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); - // 第一次下发配置 + //First configuration distribution ASSERT_EQ(4, coordinator_->CopySetHeartbeat(testCopySetInfo, ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取metaserver失败 + //Failed to obtain metaserver for the second time ASSERT_EQ(UNINITIALIZE_ID, coordinator_->CopySetHeartbeat(testCopySetInfo, ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + //3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter_, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -321,7 +321,7 @@ TEST_F(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { coordinator_->GetOpController()->GetOperatorById(info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到metaserver的信息 + //Unable to obtain information on metaserver ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(_, _)) .WillOnce(Return(false)); @@ -389,12 +389,12 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { std::make_shared(topo_)); { - // 1. copyset上没有要变更的operator + //1. There are no operators to change on the copyset ASSERT_FALSE(coordinator_->MetaserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为metaserver-1 + //2. There is a leader change on the copyset and the target leader is metaserver-1 Operator testOperator( 1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2, 1)); @@ -403,7 +403,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 3. copyset上有remove peer操作 + //3. There is a remove peer operation on the copyset Operator testOperator( 1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -412,7 +412,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 4. copyset上有add peer操作, target不是1 + //4. There is an add peer operation on the copyset, but the target is not 1 Operator testOperator( 1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(2)); @@ -421,7 +421,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 5. copyset上有add peer操作, target是1 + //5. There is an add peer operation on the copyset, with a target of 1 Operator testOperator( 1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(1)); @@ -430,7 +430,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 6. copyset上有change peer操作,target不是1 + //6. There is a change peer operation on the copyset, but the target is not 1 Operator testOperator( 1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 2)); @@ -439,7 +439,7 @@ TEST_F(CoordinatorTest, test_MetaserverGoingToAdd) { } { - // 7. copyset上有change peer操作,target是1 + //7. There is a change peer operation on the copyset, with a target of 1 Operator testOperator( 1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, steady_clock::now(), std::make_shared(4, 1)); @@ -459,7 +459,7 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { EXPECT_CALL(*topoAdapter_, Getpools()).Times(0); EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()).Times(0); - // 设置flag都为false + //Set flags to false gflags::SetCommandLineOption("enableCopySetScheduler", "false"); gflags::SetCommandLineOption("enableRecoverScheduler", "false"); gflags::SetCommandLineOption("enableLeaderScheduler", "false"); @@ -471,18 +471,18 @@ TEST_F(CoordinatorTest, test_SchedulerSwitch) { TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { /* - 场景: - metaserver1: offline 有恢复op - metaserver2: offline 没有恢复op,没有candidate,有其他op - metaserver3: offline 有candidate + Scenario: + metaserver1: offline has recovery op + metaserver2: offline has no recovery op, no candidate, and other op + metaserver3: offline has a candidate metaserver4: online metaserver4: online */ - // 获取option + //Get option ScheduleOption scheduleOption = GetFalseScheduleOption(); coordinator_->InitScheduler(scheduleOption, metric_); - // 构造metaserver + //Construct metaserver std::vector metaserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { @@ -497,7 +497,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op + //Construct op Operator opForCopySet1(1, CopySetKey{1, 1}, OperatorPriority::HighPriority, steady_clock::now(), std::make_shared(1, 4)); @@ -508,7 +508,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { steady_clock::now(), std::make_shared(2, 4)); ASSERT_TRUE(coordinator_->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + //Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); CopySetInfo copyset2(CopySetKey{1, 2}, 1, 4, peersFor2, ConfigChangeInfo{}); @@ -523,7 +523,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { CopySetInfo copyset3(CopySetKey{1, 3}, 1, 4, peersFor3, configChangeInfoForCS3); - // 1. 查询所有metaserver + //1. Query all metaservers { EXPECT_CALL(*topoAdapter_, GetMetaServerInfos()) .WillOnce(Return(metaserverInfos)); @@ -545,7 +545,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定metaserver, 但metaserver不存在 + //2. Query specified metaserver, but metaserver does not exist { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(7, _)) .WillOnce(Return(false)); @@ -556,7 +556,7 @@ TEST_F(CoordinatorTest, test_QueryMetaServerRecoverStatus) { std::vector{7}, &statusMap)); } - // 3. 查询指定metaserver, 不在恢复中 + //3. Query specified metaserver, not in recovery { EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(6, _)) .WillOnce( diff --git a/curvefs/test/mds/schedule/operatorStep_test.cpp b/curvefs/test/mds/schedule/operatorStep_test.cpp index d6378bb927..339c870c29 100644 --- a/curvefs/test/mds/schedule/operatorStep_test.cpp +++ b/curvefs/test/mds/schedule/operatorStep_test.cpp @@ -237,7 +237,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer = std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. The change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, changePeer->Apply(originCopySetInfo, ©SetConf)); @@ -247,7 +247,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); @@ -257,7 +257,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -277,7 +277,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); @@ -285,7 +285,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the oprator in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); diff --git a/curvefs/test/mds/schedule/recoverScheduler_test.cpp b/curvefs/test/mds/schedule/recoverScheduler_test.cpp index d48c6a9ee1..ec52a30628 100644 --- a/curvefs/test/mds/schedule/recoverScheduler_test.cpp +++ b/curvefs/test/mds/schedule/recoverScheduler_test.cpp @@ -196,7 +196,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInPool(_)) .WillRepeatedly(Return(90)); { - // 1. 所有metaserveronline + // 1. All metaserveronline EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id2, _)) @@ -208,7 +208,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetMetaServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); @@ -222,7 +222,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; @@ -237,7 +237,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); @@ -259,7 +259,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 5. 选不出替换metaserver + // 5. Unable to select a replacement metaserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetMetaServersInPool(_)) .WillOnce(Return(std::vector{})); @@ -268,7 +268,7 @@ TEST_F(TestRecoverSheduler, test_all_metaServer_online_offline) { } { - // 6. 在metaserver上创建copyset失败 + // 6. Failed to create copyset on metaserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInPool(_)) .WillRepeatedly(Return(3)); std::vector metaserverList( diff --git a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp index 1041519eb6..e2e79e64d7 100644 --- a/curvefs/test/mds/schedule/scheduleMetrics_test.cpp +++ b/curvefs/test/mds/schedule/scheduleMetrics_test.cpp @@ -82,7 +82,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { ::curvefs::mds::topology::CopySetInfo addCsInfo(1, 1); addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operator of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -150,7 +150,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -167,7 +167,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(_)) @@ -245,7 +245,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -263,7 +263,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -338,14 +338,14 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the map scheduleMetrics->UpdateRemoveMetric(transferOp); } } @@ -358,7 +358,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)) @@ -426,14 +426,14 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the map scheduleMetrics->UpdateRemoveMetric(changeOp); } } @@ -446,7 +446,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -459,7 +459,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { << scheduleMetrics->operators[transferOp.copysetID].JsonBody(); scheduleMetrics->UpdateRemoveMetric(transferOp); - // 获取metaserver 或者 server失败 + // Failed to obtain metaserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetMetaServer(1, _)).WillOnce(Return(false)); diff --git a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp index 04241d0209..d5160ebe18 100644 --- a/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/curvefs/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -75,7 +75,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { request.add_metaserverid(1); QueryMetaServerRecoverStatusResponse response; - // 1. 查询metaserver恢复状态返回成功 + // 1. Querying metaserver recovery status returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( @@ -91,7 +91,7 @@ TEST_F(TestScheduleService, test_QueryMetaServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的metaserverid不合法 + // 2. The metaserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryMetaServerRecoverStatus( diff --git a/curvefs/test/volume/bitmap_allocator_test.cpp b/curvefs/test/volume/bitmap_allocator_test.cpp index 3eca470fec..502be8c6e7 100644 --- a/curvefs/test/volume/bitmap_allocator_test.cpp +++ b/curvefs/test/volume/bitmap_allocator_test.cpp @@ -225,7 +225,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) { uint64_t off = opt_.startOffset; uint64_t usedSize = 0; - // 对于每一个 size per bit,随机其中一部分设置 + // For each size per bit, randomly set a portion of it auto select = [this, &usedSize](uint64_t startOffset) { auto off = rand_r(&seed) * 4096 % opt_.sizePerBit; auto len = rand_r(&seed) * 4096 % opt_.sizePerBit; diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 64109ef8e5..8d8a505775 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -56,15 +56,15 @@ class CBDClient { int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); int Extend(const char* filename, UserInfo_t* info, uint64_t size); - // 同步读写 + // Synchronous read and write int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT - // 异步读写 + // Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); - // 获取文件的基本信息 + // Obtain basic information about the file int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); diff --git a/curvefs_python/curve_type.h b/curvefs_python/curve_type.h index d6603e238d..c8ab1ed963 100644 --- a/curvefs_python/curve_type.h +++ b/curvefs_python/curve_type.h @@ -35,56 +35,56 @@ #define CURVE_INODE_APPENDECFILE 3 #define CURVE_ERROR_OK 0 -// 文件或者目录已存在 +// The file or directory already exists #define CURVE_ERROR_EXISTS 1 -// 操作失败 +// Operation failed #define CURVE_ERROR_FAILED 2 -// 禁止IO +// Prohibit IO #define CURVE_ERROR_DISABLEIO 3 -// 认证失败 +// Authentication failed #define CURVE_ERROR_AUTHFAIL 4 -// 正在删除 +// Removing #define CURVE_ERROR_DELETING 5 -// 文件不存在 +// File does not exist #define CURVE_ERROR_NOTEXIST 6 -// 快照中 +// In the snapshot #define CURVE_ERROR_UNDER_SNAPSHOT 7 -// 非快照期间 +// During non snapshot periods #define CURVE_ERROR_NOT_UNDERSNAPSHOT 8 -// 删除错误 +// Delete Error #define CURVE_ERROR_DELETE_ERROR 9 -// segment未分配 +// Segment not allocated #define CURVE_ERROR_NOT_ALLOCATE 10 -// 操作不支持 +// Operation not supported #define CURVE_ERROR_NOT_SUPPORT 11 -// 目录非空 +// Directory is not empty #define CURVE_ERROR_NOT_EMPTY 12 -// 禁止缩容 +// Prohibit shrinkage #define CURVE_ERROR_NO_SHRINK_BIGGER_FILE 13 -// session不存在 +// Session does not exist #define CURVE_ERROR_SESSION_NOTEXISTS 14 -// 文件被占用 +// File occupied #define CURVE_ERROR_FILE_OCCUPIED 15 -// 参数错误 +// Parameter error #define CURVE_ERROR_PARAM_ERROR 16 -// MDS一侧存储错误 +// MDS side storage error #define CURVE_ERROR_INTERNAL_ERROR 17 -// crc检查错误 +// CRC check error #define CURVE_ERROR_CRC_ERROR 18 -// request参数存在问题 +// There is an issue with the request parameter #define CURVE_ERROR_INVALID_REQUEST 19 -// 磁盘存在问题 +// There is a problem with the disk #define CURVE_ERROR_DISK_FAIL 20 -// 空间不足 +// Insufficient space #define CURVE_ERROR_NO_SPACE 21 -// IO未对齐 +// IO misalignment #define CURVE_ERROR_NOT_ALIGNED 22 -// 文件被关闭,fd不可用 +// File closed, fd not available #define CURVE_ERROR_BAD_FD 23 -// 文件长度不支持 +// File length not supported #define CURVE_ERROR_LENGTH_NOT_SUPPORT 24 -// 文件状态 +// File Status #define CURVE_FILE_CREATED 0 #define CURVE_FILE_DELETING 1 #define CURVE_FILE_CLONING 2 @@ -92,7 +92,7 @@ #define CURVE_FILE_CLONED 4 #define CURVE_FILE_BEINGCLONED 5 -// 未知错误 +//Unknown error #define CURVE_ERROR_UNKNOWN 100 #define CURVE_OP_READ 0 diff --git a/curvefs_python/curvefs_tool.py b/curvefs_python/curvefs_tool.py index f2fb582214..2c8277ae2f 100644 --- a/curvefs_python/curvefs_tool.py +++ b/curvefs_python/curvefs_tool.py @@ -26,7 +26,7 @@ kGB = 1024 * 1024 * 1024 kUnitializedFileID = 0 -# 参照curve/include/client/libcurve.h +# Refer to curve/include/client/libcurve.h retCode = { 0 : "OK", 1 : "EXISTS", 2 : "FAILED", @@ -65,17 +65,17 @@ def getRetCodeMsg(ret): return "Unknown Error Code" if __name__ == '__main__': - # 参数解析 + # Parameter parsing args = parser.get_parser().parse_args() - # 初始化client + # Initialize client cbd = curvefs.CBDClient() ret = cbd.Init(args.confpath) if ret != 0: print "init fail" exit(1) - # 获取文件user信息 + # Obtain file user information user = curvefs.UserInfo_t() user.owner = args.user if args.password: diff --git a/curvefs_python/libcurvefs.h b/curvefs_python/libcurvefs.h index 55c6bf55fe..614b0c5344 100644 --- a/curvefs_python/libcurvefs.h +++ b/curvefs_python/libcurvefs.h @@ -38,15 +38,15 @@ int Open4Qemu(const char* filename); int Open(const char* filename, UserInfo_t* info); int Create(const char* filename, UserInfo_t* info, size_t size); -// 同步读写 +// Synchronous read and write int Read(int fd, char* buf, unsigned long offset, unsigned long length); //NOLINT int Write(int fd, const char* buf, unsigned long offset, unsigned long length); //NOLINT -// 异步读写 +// Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); -// 获取文件的基本信息 +// Obtain basic information about the file int StatFile4Qemu(const char* filename, FileInfo_t* finfo); int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); diff --git a/curvefs_python/test.py b/curvefs_python/test.py index 0f0045fa62..be1f06365e 100644 --- a/curvefs_python/test.py +++ b/curvefs_python/test.py @@ -37,10 +37,10 @@ def exec_cmd(cmd): exec_cmd(cmd) cmd = "curve list --user k8s --dirname /k8s" exec_cmd(cmd) -# 不是root,失败 +# Not root, failed cmd = "curve list --user k8s --dirname /" exec_cmd(cmd) -# root没有传入密码,失败 +# Root did not pass in password, failed cmd = "curve list --user root --dirname /" exec_cmd(cmd) cmd = "curve list --user root --dirname / --password root_password" diff --git a/curvesnapshot_python/libcurveSnapshot.cpp b/curvesnapshot_python/libcurveSnapshot.cpp index 5cdce45219..f7aa511a89 100644 --- a/curvesnapshot_python/libcurveSnapshot.cpp +++ b/curvesnapshot_python/libcurveSnapshot.cpp @@ -48,32 +48,32 @@ class TaskTracker { lastErr_(0) {} /** - * @brief 增加一个追踪任务 + * @brief Add a tracking task */ void AddOneTrace() { concurrent_.fetch_add(1, std::memory_order_acq_rel); } /** - * @brief 获取任务数量 + * @brief Get the number of tasks * - * @return 任务数量 + * @return Number of tasks */ uint32_t GetTaskNum() const { return concurrent_; } /** - * @brief 处理任务返回值 + * @brief processing task return value * - * @param retCode 返回值 + * @param retCode return value */ void HandleResponse(int retCode) { if (retCode < 0) { lastErr_ = retCode; } if (1 == concurrent_.fetch_sub(1, std::memory_order_acq_rel)) { - // 最后一次需拿锁再发信号,防止先发信号后等待导致死锁 + // The last time you need to take the lock and send the signal again, to prevent deadlock caused by waiting after sending the signal first std::unique_lock lk(cv_m); cv_.notify_all(); } else { @@ -82,7 +82,7 @@ class TaskTracker { } /** - * @brief 等待追踪的所有任务完成 + * @brief Waiting for all tracked tasks to be completed */ void Wait() { std::unique_lock lk(cv_m); @@ -91,21 +91,21 @@ class TaskTracker { } /** - * @brief 获取最后一个错误 + * @brief Get Last Error * - * @return 错误码 + * @return error code */ int GetResult() { return lastErr_; } private: - // 等待的条件变量 + // Waiting condition variable ConditionVariable cv_; Mutex cv_m; - // 并发数量 + // Concurrent quantity std::atomic concurrent_; - // 错误码 + // Error code int lastErr_; }; diff --git a/curvesnapshot_python/libcurveSnapshot.h b/curvesnapshot_python/libcurveSnapshot.h index bb45a02f57..15c37de042 100644 --- a/curvesnapshot_python/libcurveSnapshot.h +++ b/curvesnapshot_python/libcurveSnapshot.h @@ -73,21 +73,21 @@ typedef struct CChunkIDInfo { type_uInt32_t lpid_; } CChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct CChunkInfoDetail { type_uInt64_t snSize; std::vector chunkSn; } CChunkInfoDetail_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the logicalpool typedef struct CLogicalPoolCopysetIDInfo { type_uInt32_t lpid; type_uInt32_t cpidVecSize; std::vector cpidVec; } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct CSegmentInfo { type_uInt32_t segmentsize; type_uInt32_t chunksize; @@ -113,43 +113,43 @@ typedef struct CFInfo { int Init(const char* path); /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of the file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int CreateSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t* seq); /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int DeleteSnapShot(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq); /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot of the file + * @param: snapinfo is a parameter that saves the basic information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int GetSnapShot(const char* fname, const CUserInfo_t userinfo, type_uInt64_t seq, CFInfo_t* snapinfo); /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int GetSnapshotSegmentInfo(const char* filename, const CUserInfo_t userinfo, @@ -158,13 +158,13 @@ int GetSnapshotSegmentInfo(const char* filename, CSegmentInfo *segInfo); /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t seq, @@ -172,37 +172,37 @@ int ReadChunkSnapshot(CChunkIDInfo cidinfo, type_uInt64_t len, char *buf); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected */ int DeleteChunkSnapshotOrCorrectSn(CChunkIDInfo cidinfo, type_uInt64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot */ int GetChunkInfo(CChunkIDInfo cidinfo, CChunkInfoDetail *chunkInfo); /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information */ int CheckSnapShotStatus(const char* filename, const CUserInfo_t userinfo, type_uInt64_t seq, type_uInt32_t* filestatus); /** - * 获取快照分配信息 - * @param: filename是当前文件名 - * @param: offset是当前的文件偏移 - * @param: segmentsize为segment大小 + * Obtain snapshot allocation information + * @param: filename is the current file name + * @param: offset is the current file offset + * @param: segmentsize is the segment size * @param: chunksize - * @param: userinfo是用户信息 - * @param[out]: segInfo是出参 + * @param: userinfo is the user information + * @param[out]: segInfo is the output parameter */ int GetOrAllocateSegmentInfo(const char* filename, type_uInt64_t offset, @@ -211,19 +211,19 @@ int GetOrAllocateSegmentInfo(const char* filename, const CUserInfo_t userinfo, CSegmentInfo *segInfo); /** - * @brief lazy 创建clone chunk + * @brief lazy Create clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs + * - The format of 'location' is defined as A@B. + * - If the source data is on S3, the 'location' format is uri@s3, where 'uri' is the actual address of the chunk object. + * - If the source data is on CurveFS, the 'location' format is /filename/chunkindex@cs. * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn + * @param: location The URL of the data source + * @param: chunkidinfo The target chunk + * @param: sn The sequence number of the chunk + * @param: chunkSize The size of the chunk + * @param: correntSn Used for modifying the 'correctedSn' when creating the clone chunk * - * @return 错误码 + * @return error code */ int CreateCloneChunk(const char* location, const CChunkIDInfo chunkidinfo, @@ -232,20 +232,20 @@ int CreateCloneChunk(const char* location, type_uInt64_t chunkSize); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length * - * @return 错误码 + * @return error code */ int RecoverChunk(const CChunkIDInfo chunkidinfo, type_uInt64_t offset, type_uInt64_t len); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInit(); diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 1525855ebe..b6b0010c83 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./0/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index d14fa15bb6..62719e0c30 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./1/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 2604423d6f..edc5750db7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./2/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -142,7 +142,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./2/walfilepool/ diff --git a/include/chunkserver/chunkserver_common.h b/include/chunkserver/chunkserver_common.h index c483dbea82..4a6ba52ae5 100644 --- a/include/chunkserver/chunkserver_common.h +++ b/include/chunkserver/chunkserver_common.h @@ -61,9 +61,9 @@ using SnapshotThrottle = braft::SnapshotThrottle; using ThroughputSnapshotThrottle = braft::ThroughputSnapshotThrottle; -// TODO(lixiaocui): 考虑一下后续的单元测试或者校验要怎么做 +// TODO(lixiaocui): Consider how to proceed with subsequent unit testing or validation /* - * IO性能统计复合metric类型 + * IO performance statistics composite metric type */ struct IoPerfMetric { uint64_t readCount; @@ -77,7 +77,7 @@ struct IoPerfMetric { }; /** - * 将(LogicPoolID, CopysetID)二元组转换成数字格式的复制组ID,格式如下: + * Convert the (LogicPoolID, CopysetID) binary into a copy group ID in numerical format, as follows: * | group id | * | 32 | 32 | * | logic pool id | copyset id | @@ -87,7 +87,7 @@ inline GroupNid ToGroupNid(const LogicPoolID &logicPoolId, return (static_cast(logicPoolId) << 32) | copysetId; } /** - * 将(LogicPoolID, CopysetID)二元组转换成字符串格式的复制组ID + *Convert the (LogicPoolID, CopysetID) binary to a copy group ID in string format */ inline GroupId ToGroupId(const LogicPoolID &logicPoolId, const CopysetID ©setId) { @@ -96,19 +96,19 @@ inline GroupId ToGroupId(const LogicPoolID &logicPoolId, #define ToBraftGroupId ToGroupId /** - * 从数字格式的复制组ID中解析LogicPoolID + *Parsing LogicPoolID from Copy Group ID in Numeric Format */ inline LogicPoolID GetPoolID(const GroupNid &groupId) { return groupId >> 32; } /** - * 从数字格式的复制组ID中解析CopysetID + *Parsing CopysetID from Copy Group ID in Numeric Format */ inline CopysetID GetCopysetID(const GroupNid &groupId) { return groupId & (((uint64_t)1 << 32) - 1); } -/* 格式输出 group id 的 字符串 (logicPoolId, copysetId) */ +/*Format output string for group ID (logicPoolId, copysetId)*/ inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, const CopysetID ©setId) { std::string groupIdString; diff --git a/include/client/libcurve.h b/include/client/libcurve.h index 58459c8bb2..158725391a 100644 --- a/include/client/libcurve.h +++ b/include/client/libcurve.h @@ -57,24 +57,24 @@ typedef struct FileStatInfo { uint32_t blocksize; } FileStatInfo_t; -// 存储用户信息 +// Storing User Information typedef struct C_UserInfo { - // 当前执行的owner信息, owner信息需要以'\0'结尾 + // The current owner information needs to end with'\0' char owner[NAME_MAX_SIZE]; - // 当owner="root"的时候,需要提供password作为计算signature的key - // password信息需要以'\0'结尾 + // When owner="root", password needs to be provided as the key for calculating the signature + // password information needs to end with '\0' char password[NAME_MAX_SIZE]; } C_UserInfo_t; typedef struct DirInfo { - // 当前listdir的目录路径 + // The directory path of the current listdir char* dirpath; - // 当前listdir操作的用户信息 + // User information for the current listdir operation C_UserInfo_t* userinfo; - // 当前dir大小,也就是文件数量 + // The current dir size, which is the number of files uint64_t dirSize; - // 当前dir的内的文件信息内容,是一个数组 - // fileStat是这个数组的头,数组大小为dirSize + // The file information content within the current dir is an array + // fileStat is the header of this array, with an array size of dirSize FileStatInfo_t* fileStat; } DirInfo_t; @@ -85,17 +85,17 @@ extern "C" { const char* LibCurveErrorName(LIBCURVE_ERROR err); /** - * 初始化系统 - * @param: path为配置文件路径 - * @return: 成功返回0,否则返回-1. + * Initialize the system + * @param: path is the configuration file path + * @return: Successfully returns 0, otherwise returns -1 */ int Init(const char* path); /** - * 打开文件,qemu打开文件的方式 - * @param: filename文件名, filename中包含用户信息 - * 例如:/1.img_userinfo_ - * @return: 返回文件fd + * Open a file , the way qemu to open a file + * @param: filename File name, which contains user information + * For example:/1.img_userinfo_ + * @return: Return the file fd */ int Open4Qemu(const char* filename); @@ -109,41 +109,41 @@ int Open4Qemu(const char* filename); int IncreaseEpoch(const char* filename); /** - * 打开文件,非qemu场景 - * @param: filename文件名 - * @param: userinfo为要打开的文件的用户信息 - * @return: 返回文件fd + * Open file, non qemu scene + * @param: filename File name + * @param: userinfo is the user information of the file to be opened + * @return: Return the file fd */ int Open(const char* filename, const C_UserInfo_t* userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回 0, 失败返回小于0,可能有多种可能,比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when opening or creating + * @param: size file length. When create is true, create a file with size length + * @return: Success returns 0, failure returns less than 0, and there may be multiple possibilities, such as internal errors or the file already exists */ int Create(const char* filename, const C_UserInfo_t* userinfo, size_t size); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回读取长度, 否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: Offset The offset within the file + * @param: length is the length to be read + * @return: Successfully returned the read length, otherwise -LIBCURVE_ERROR::FAILED, etc */ int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的偏移 - * @parma:length为待读取的长度 - * @return: 成功返回 写入长度,否则-LIBCURVE_ERROR::FAILED等 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: Offset The offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the write length, otherwise - LIBCURVE_ERROR::FAILED, etc */ int Write(int fd, const char* buf, off_t offset, size_t length); @@ -158,18 +158,18 @@ int Write(int fd, const char* buf, off_t offset, size_t length); int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO information + * @return: Successfully returns 0, otherwise - LIBCURVE_ERROR::FAILED */ int AioRead(int fd, CurveAioContext* aioctx); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 - * @return: 成功返回 0,否则-LIBCURVE_ERROR::FAILED + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO information + * @return: Successfully returns 0, otherwise -LIBCURVE_ERROR::FAILED */ int AioWrite(int fd, CurveAioContext* aioctx); @@ -182,51 +182,51 @@ int AioWrite(int fd, CurveAioContext* aioctx); int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路径 - * @param: newpath目标路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Rename File + * @param: userinfo is the user information + * @param: oldpath source path + * @param: newpath Target Path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Rename(const C_UserInfo_t* userinfo, const char* oldpath, const char* newpath); // NOLINT /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Extend(const char* filename, const C_UserInfo_t* userinfo, uint64_t newsize); // NOLINT /** - * 扩展文件,Qemu场景在线扩容 - * @param: filename文件名 - * @param: newsize新的size - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Expanding files, Qemu scene online expansion + * @param: filename File name + * @param: newsize New size + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Extend4Qemu(const char* filename, int64_t newsize); // NOLINT /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Unlink(const char* filename, const C_UserInfo_t* userinfo); /** - * 强制删除文件, unlink删除文件在mds一侧并不是真正的删除, - * 而是放到了垃圾回收站,当使用DeleteForce接口删除的时候是直接删除 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Forced deletion of files, unlink deletion of files on the mds side is not a true deletion, + * Instead, it was placed in the garbage collection bin, and when deleted using the DeleteForce interface, it was directly deleted + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int DeleteForce(const char* filename, const C_UserInfo_t* userinfo); @@ -242,93 +242,93 @@ int Recover(const char* filename, const C_UserInfo_t* userinfo, uint64_t fileId); /** - * 在获取目录内容之前先打开文件夹 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回一个非空的DirInfo_t指针,否则返回一个空指针 + * Open the folder before obtaining directory content + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned a non empty DirInfo_ Pointer t, otherwise return a null pointer */ DirInfo_t* OpenDir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 枚举目录内容, 用户OpenDir成功之后才能list - * @param[in][out]: dirinfo为OpenDir返回的指针, 内部会将mds返回的信息放入次结构中 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Enumerate directory contents, only after the user OpenDir is successful can they be listed + * @param[in][out]: dirinfo is the pointer returned by OpenDir, which internally places the information returned by mds into the substructures + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Listdir(DirInfo_t* dirinfo); /** - * 关闭打开的文件夹 - * @param: dirinfo为opendir返回的dir信息 + * Close Open Folder + * @param: dirinfo is the dir information returned by opendir */ void CloseDir(DirInfo_t* dirinfo); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Mkdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Rmdir(const char* dirpath, const C_UserInfo_t* userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information of the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int StatFile(const char* filename, const C_UserInfo_t* userinfo, FileStatInfo* finfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Obtain file information + * @param: filename File name + * @param: finfo is an output parameter that carries the basic information of the current file + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int StatFile4Qemu(const char* filename, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int ChangeOwner(const char* filename, const char* newOwner, const C_UserInfo_t* userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回 0, - * 否则可能返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Successfully returned 0, + * Otherwise, it may return to -LIBCURVE_ERROR::FAILED, -LIBCURVE_ERROR::AUTHFAILED, etc */ int Close(int fd); void UnInit(); /** - * @brief: 获取集群id, id用UUID标识 - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 否则返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain the cluster ID, which is identified by UUID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Successfully returns 0, otherwise returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); @@ -346,11 +346,11 @@ enum class UserDataType { IOBuffer // butil::IOBuf* }; -// 存储用户信息 +// Storing User Information typedef struct UserInfo { - // 当前执行的owner信息 + // Current owner information for execution std::string owner; - // 当owner=root的时候,需要提供password作为计算signature的key + // When owner=root, password needs to be provided as the key for calculating the signature std::string password; UserInfo() = default; @@ -380,14 +380,14 @@ class CurveClient { virtual ~CurveClient(); /** - * 初始化 - * @param configPath 配置文件路径 - * @return 返回错误码 + * Initialize + * @param configPath Configuration file path + * @return returns an error code */ virtual int Init(const std::string& configPath); /** - * 反初始化 + * Deinitialization */ virtual void UnInit(); @@ -400,62 +400,62 @@ class CurveClient { virtual int IncreaseEpoch(const std::string& filename); /** - * 打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Open File + * @param filename File name, format: File name_ User name_ * @param[out] sessionId session Id - * @return 成功返回fd,失败返回-1 + * @return successfully returns fd, failure returns -1 */ virtual int Open(const std::string& filename, const OpenFlags& openflags); /** - * 重新打开文件 - * @param filename 文件名,格式为:文件名_用户名_ + * Reopen File + * @param filename File name, format: File name_ User name_ * @param sessionId session Id - * @param[out] newSessionId reOpen之后的新sessionId - * @return 成功返回fd,失败返回-1 + * @param[out] newSessionId New sessionId after reOpen + * @return successfully returns fd, failure returns -1 */ virtual int ReOpen(const std::string& filename, const OpenFlags& openflags); /** - * 关闭文件 - * @param fd 文件fd - * @return 返回错误码 + * Close File + * @param fd file fd + * @return returns an error code */ virtual int Close(int fd); /** - * 扩展文件 - * @param filename 文件名,格式为:文件名_用户名_ - * @param newsize 扩展后的大小 - * @return 返回错误码 + * Extension file + * @param filename File name, format: File name_ User name_ + * @param newsize The expanded size + * @return returns an error code */ virtual int Extend(const std::string& filename, int64_t newsize); /** - * 获取文件大小 - * @param fd 文件fd - * @return 返回错误码 + * Get File Size + * @param fd file fd + * @return returns an error code */ virtual int64_t StatFile(int fd, FileStatInfo* fileStat); /** - * 异步读 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous reading + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType); /** - * 异步写 - * @param fd 文件fd - * @param aioctx 异步读写的io上下文 + * Asynchronous writing + * @param fd file fd + * @param aioctx asynchronous read/write IO context * @param dataType type of user buffer - * @return 返回错误码 + * @return returns an error code */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType); @@ -469,8 +469,8 @@ class CurveClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 测试使用,设置fileclient - * @param client 需要设置的fileclient + * Test usage, set fileclient + * @param client The fileclient that needs to be set */ void SetFileClient(FileClient* client); diff --git a/include/etcdclient/etcdclient.h b/include/etcdclient/etcdclient.h index 42f63a7436..ff3f5f094f 100644 --- a/include/etcdclient/etcdclient.h +++ b/include/etcdclient/etcdclient.h @@ -41,7 +41,7 @@ typedef struct { const char *p; ptrdiff_t n; } _GoString_; enum EtcdErrCode { - // grpc errCode, 具体的含义见: + // The specific meaning of grpc errCode is as follows: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -62,7 +62,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom error code EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -149,7 +149,7 @@ extern "C" { #endif -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required extern GoUint32 NewEtcdClientV3(struct EtcdConf p0); @@ -182,7 +182,7 @@ struct EtcdClientList_return { GoInt64 r2; }; -// TODO(lixiaocui): list可能需要有长度限制 +// TODO(lixiaocui): list may require a length limit extern struct EtcdClientList_return EtcdClientList(int p0, char* p1, char* p2, int p3, int p4); diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf index 71ca380f13..8bc37cb542 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/var/lib/nebd/nebd.sock -# 文件锁路径 +# File lock path metacache.fileLockPath=/var/lib/nebd/lock -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/var/log/nebd/client diff --git a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf index b03e7a25c6..4dcb28c7e6 100644 --- a/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf +++ b/k8s/nebd/nebd-package/etc/nebd/nebd-server.conf @@ -1,14 +1,14 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf -#brpc server监听端口 +# brpc server listening port listen.address=/var/lib/nebd/nebd.sock -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/var/lib/nebd/nebdserver.meta -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 \ No newline at end of file diff --git a/mk-deb.sh b/mk-deb.sh index 9a448a6bce..fa8ee36eb6 100755 --- a/mk-deb.sh +++ b/mk-deb.sh @@ -20,7 +20,7 @@ set -o errexit dir=$(pwd) -# step1 清除生成的目录和文件 +# Step1 Clear generated directories and files bazel clean cleandir=( @@ -38,15 +38,15 @@ rm -rf "${cleandir[@]}" git submodule update --init -# step2 获取tag版本和git提交版本信息 -# 获取tag版本 +# Step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') if [ -z ${tag_version} ]; then echo "not found version info, set version to 9.9.9" tag_version=9.9.9 fi -# 获取git提交版本信息 +# Obtain git submission version information commit_id=$(git rev-parse --short HEAD) if [ "$1" = "debug" ]; then debug="+debug" @@ -125,7 +125,7 @@ function build_curvefs_python() { done } -# step3 执行编译 +# Step3 Execute Compilation bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') if [ -z ${bazel_version} ]; then echo "please install bazel 4.2.2 first" @@ -220,7 +220,7 @@ else fi echo "end compile" -#step4 创建临时目录,拷贝二进制、lib库和配置模板 +# Step4 Create a temporary directory, copy binaries, lib libraries, and configuration templates mkdir build cp -r curve-mds build/ cp -r curve-chunkserver build/ @@ -311,7 +311,7 @@ cp -r k8s/nbd/nbd-package build/k8s-nbd-package mkdir -p build/k8s-nbd-package/usr/bin cp bazel-bin/nbd/src/curve-nbd build/k8s-nbd-package/usr/bin -# step5 记录到debian包的配置文件,打包debian包 +# Step5 Record the configuration file of the Debian package and package the Debian package version="Version: ${curve_version}" echo ${version} >>build/curve-mds/DEBIAN/control echo ${version} >>build/curve-sdk/DEBIAN/control @@ -337,10 +337,10 @@ dpkg-deb -b build/k8s-nebd-package . dpkg-deb -b build/nbd-package . dpkg-deb -b build/k8s-nbd-package . -# step6 清理libetcdclient.so编译出现的临时文件 +# Step6 Clean up temporary files that appear during libetcdclient.so compilation cd ${dir}/thirdparties/etcdclient make clean cd ${dir} -# step7 打包python wheel +# Step7 Packaging Python Wheel build_curvefs_python $1 diff --git a/mk-tar.sh b/mk-tar.sh index 0bb25540c2..fb5588b98e 100755 --- a/mk-tar.sh +++ b/mk-tar.sh @@ -18,7 +18,7 @@ dir=$(pwd) -# step1 清除生成的目录和文件 +# Step1 Clear generated directories and files bazel clean cleandir=( @@ -36,15 +36,15 @@ rm -rf "${cleandir[@]}" git submodule update --init -# step2 获取tag版本和git提交版本信息 -# 获取tag版本 +# Step2 Obtaining Tag Version and Git Submission Version Information +# Get Tag Version tag_version=$(git status | grep -Ew "HEAD detached at|On branch" | awk '{print $NF}' | awk -F"v" '{print $2}') if [ -z ${tag_version} ]; then echo "not found version info, set version to 9.9.9" tag_version=9.9.9 fi -# 获取git提交版本信息 +# Obtain git submission version information commit_id=$(git rev-parse --short HEAD) if [ "$1" = "debug" ]; then debug="+debug" @@ -123,7 +123,7 @@ function build_curvefs_python() { done } -# step3 执行编译 +# Step3 Execute Compilation bazel_version=$(bazel version | grep "Build label" | awk '{print $3}') if [ -z ${bazel_version} ]; then echo "please install bazel 4.2.2 first" @@ -218,7 +218,7 @@ else fi echo "end compile" -#step4 创建临时目录,拷贝二进制、lib库和配置模板 +# Step4 Create a temporary directory, copy binaries, lib libraries, and configuration templates echo "start copy" mkdir -p build/curve/ # curve-mds @@ -299,7 +299,7 @@ cp nbd/nbd-package/usr/bin/map_curve_disk.sh build/nbd-package/bin cp nbd/nbd-package/etc/curve/curvetab build/nbd-package/etc cp nbd/nbd-package/etc/systemd/system/map_curve_disk.service build/nbd-package/etc -# step5 打包tar包 +# Step5 Packaging tar package echo "start make tarball" cd ${dir}/build curve_name="curve_${curve_version}.tar.gz" @@ -320,14 +320,14 @@ tar zcf ${nbd_name} nbd-package cp ${nbd_name} $dir echo "end make tarball" -# step6 清理libetcdclient.so编译出现的临时文件 +# Step6 Clean up temporary files that appear during libetcdclient.so compilation echo "start clean etcd" cd ${dir}/thirdparties/etcdclient make clean cd ${dir} echo "end clean etcd" -# step7 打包python wheel +# Step7 Packaging python wheel echo "start make python wheel" build_curvefs_python $1 echo "end make python wheel" diff --git a/monitor/grafana-report.py b/monitor/grafana-report.py index a400263e8c..89c517d25f 100644 --- a/monitor/grafana-report.py +++ b/monitor/grafana-report.py @@ -13,7 +13,7 @@ sender = 'Grafana' to_address = ['xxxxxxxxx@163.com'] username = 'xxxxxxxxx@163.com' -password = 'xxxxxxxxx' # SMTP授权码 +password = 'xxxxxxxxx' # SMTP authorization code smtpserver = 'xxxx.163.com:1234' sourcefile= '/etc/curve/monitor/grafana/report/report.tex' imagedir= '/etc/curve/monitor/grafana/report/images/' @@ -60,33 +60,33 @@ def attach_body(msgRoot): html_str = '%s' % (image_body) mailMsg = """ -

可点击如下链接在grafana面板中查看(若显示混乱,请在附件pdf中查看)

-

grafana链接

+

You can click the following link to view it in the Grafana dashboard (if the display is chaotic, please refer to the attached PDF)

+

grafana link

""" % (grafanauri) mailMsg += html_str print(mailMsg) content = MIMEText(mailMsg,'html','utf-8') msgRoot.attach(content) -# 发送dashboard日报邮件 +# Send dashboard daily email def send_mail(): time_now = int(Time.time()) time_local = Time.localtime(time_now) dt = Time.strftime("%Y%m%d",time_local) msgRoot = MIMEMultipart('related') - msgRoot['Subject'] = '%s集群监控日报-%s' % (clustername, dt) + msgRoot['Subject'] = '%sCluster Monitoring Daily Report-%s' % (clustername, dt) msgRoot['From'] = sender - msgRoot['To'] = ",".join( to_address ) # 发给多人 + msgRoot['To'] = ",".join( to_address ) # Send to multiple people - # 添加pdf附件 + # Add PDF attachment pdf_attach = MIMEText(open(pdfpath, 'rb').read(), 'base64', 'utf-8') pdf_attach["Content-Type"] = 'application/octet-stream' - # 这里的filename可以任意写,写什么名字,邮件中显示什么名字 + # The file name here can be written arbitrarily, including the name you want to write and the name displayed in the email pdf_attach["Content-Disposition"] = 'attachment; filename="reporter-{}.pdf"'.format(dt) msgRoot.attach(pdf_attach) - # 添加正文 + # Add Body attach_body(msgRoot) smtp = smtplib.SMTP_SSL(smtpserver) diff --git a/monitor/grafana/dashboards/chunkserver.json b/monitor/grafana/dashboards/chunkserver.json index 2770cd2802..e48e7a0721 100644 --- a/monitor/grafana/dashboards/chunkserver.json +++ b/monitor/grafana/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process running time", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successful requests processed per second for all RPCs on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the read_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the RPC level for the write_chunk operation", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "Percentile values of RPC-level read chunk latency", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read-write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "Number of errors per second for read_chunk at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "Number of read_chunk operations successfully processed per second at the chunk service layer", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "Number of read_chunk requests received per second at the chunk service layer.", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/dashboards/client.json b/monitor/grafana/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/dashboards/client.json +++ b/monitor/grafana/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/dashboards/etcd.json b/monitor/grafana/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/dashboards/etcd.json +++ b/monitor/grafana/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/mds.json b/monitor/grafana/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/dashboards/mds.json +++ b/monitor/grafana/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/dashboards/report.json b/monitor/grafana/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/dashboards/report.json +++ b/monitor/grafana/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/dashboards/snapshotcloneserver.json b/monitor/grafana/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/chunkserver.json b/monitor/grafana/provisioning/dashboards/chunkserver.json index 2770cd2802..89ce686aa7 100644 --- a/monitor/grafana/provisioning/dashboards/chunkserver.json +++ b/monitor/grafana/provisioning/dashboards/chunkserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -218,7 +218,7 @@ } } ], - "title": "进程资源占用", + "title": "Process resource usage", "type": "row" }, { @@ -237,7 +237,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver上所有rpc的每秒处理成功的请求个数", + "description": "The number of successfully processed requests per second for all RPCs on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -410,7 +410,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在rpc层面的错误个数", + "description": "The number of errors per second at the rpc level in read_chunk", "fill": 1, "gridPos": { "h": 6, @@ -675,7 +675,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "write_chunk每秒在rpc层面的错误个数", + "description": "Write_chunk The number of errors per second at the rpc level", "fill": 1, "gridPos": { "h": 6, @@ -1027,7 +1027,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "rpc层面read chunk延时的分位值", + "description": "The quantile value of read chunk delay at the rpc level", "fill": 1, "gridPos": { "h": 7, @@ -1281,7 +1281,7 @@ } } ], - "title": "rpc层读写指标", + "title": "RPC layer read and write metrics", "type": "row" }, { @@ -1300,7 +1300,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "read_chunk每秒在chunk service层面的错误个数", + "description": "The number of read_chunk errors per second at the chunk service level", "fill": 1, "gridPos": { "h": 7, @@ -1392,7 +1392,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1484,7 +1484,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的read_chunk请求个数", + "description": "The number of read_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1576,7 +1576,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面write_chunk每秒返回错误的请求个数", + "description": "The number of requests per second that the chunk service level write_chunk returns errors", "fill": 1, "gridPos": { "h": 7, @@ -1668,7 +1668,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -1762,7 +1762,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒接收到的write_chunk请求个数", + "description": "The number of Write_chunk requests received by the chunk service layer per second", "fill": 1, "gridPos": { "h": 7, @@ -1854,7 +1854,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk延时的分位值", + "description": "The percentile value of read chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -1965,7 +1965,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk延时的分位值", + "description": "The percentile value of write chunk delay at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2076,7 +2076,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的read chunk的平均延时", + "description": "Average latency of read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2166,7 +2166,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面的write chunk的平均延时", + "description": "Average latency of write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2256,7 +2256,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2346,7 +2346,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2436,7 +2436,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面read chunk的io大小的分位值", + "description": "The quantile value of the IO size of the read chunk at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2541,7 +2541,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面write chunk的io大小的分位值", + "description": "The quantile value of IO size for write chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2642,7 +2642,7 @@ } ], "repeat": null, - "title": "chunkserver层读写指标", + "title": "Chunkserver layer read and write metrics", "type": "row" }, { @@ -2664,7 +2664,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的数量", + "description": "Number of selected copysets", "format": "none", "gauge": { "maxValue": 100, @@ -2747,7 +2747,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "选中的copyset的chunk数量的总和", + "description": "The total number of chunks in the selected copyset", "format": "none", "gauge": { "maxValue": 100, @@ -2828,7 +2828,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "各copyset上已分配的chunk的数量", + "description": "The number of allocated chunks on each copyset", "fill": 1, "gridPos": { "h": 6, @@ -2920,7 +2920,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的read chunk请求个数", + "description": "The number of read chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3012,7 +3012,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3104,7 +3104,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的read chunk请求个数", + "description": "The number of read chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3196,7 +3196,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒出错的write chunk请求个数", + "description": "The number of write chunk requests with errors per second at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3288,7 +3288,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the copyset level per second", "fill": 1, "gridPos": { "h": 7, @@ -3380,7 +3380,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒接收到的write chunk请求个数", + "description": "The number of write chunk requests received by the copyset layer per second", "fill": 1, "gridPos": { "h": 7, @@ -3472,7 +3472,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3564,7 +3564,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3656,7 +3656,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的read chunk的平均延时", + "description": "Average latency of read chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3746,7 +3746,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "copyset层面的write chunk的平均延时", + "description": "Average latency of write chunks at the copyset level", "fill": 1, "gridPos": { "h": 7, @@ -3831,7 +3831,7 @@ } } ], - "title": "copyset指标", + "title": "Copyset metric", "type": "row" }, { @@ -3850,7 +3850,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -3942,7 +3942,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -4033,7 +4033,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上copyset的数量", + "description": "Number of copysets on chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -4119,7 +4119,7 @@ } } ], - "title": "chunkserver关键指标", + "title": "Chunkserver Key Metrics", "type": "row" }, { @@ -4773,7 +4773,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -4860,7 +4860,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, @@ -5121,7 +5121,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "follower正在下载快照的任务数量(一个copyset最多一个任务)", + "description": "Number of tasks being downloaded by the follower (one copyset can only have one task)", "fill": 1, "gridPos": { "h": 8, @@ -5204,7 +5204,7 @@ } } ], - "title": "Raft关键指标", + "title": "Raft Key Metrics", "type": "row" }, { @@ -5228,7 +5228,7 @@ "rgba(237, 129, 40, 0.89)", "#299c46" ], - "description": "chunkserver上bthread worker的数量", + "description": "Number of bthread workers on chunkserver", "format": "none", "gauge": { "maxValue": 100, @@ -5311,7 +5311,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上正在被使用的工作线程个数", + "description": "The number of worker threads currently in use on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5399,7 +5399,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上bthread的数量", + "description": "Number of bthreads on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5487,7 +5487,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "chunkserver上execution queue的数量", + "description": "Number of execution queues on chunkserver", "fill": 1, "gridPos": { "h": 7, @@ -5585,7 +5585,7 @@ } } ], - "title": "线程指标", + "title": "Thread metrics", "type": "row" } ], @@ -5605,7 +5605,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"([[ip:pipe]]):[[port:regex]]\"}", "hide": 2, "includeAll": false, - "label": "实例", + "label": "Instance", "multi": true, "name": "instance", "options": [], @@ -5630,7 +5630,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": true, "name": "ip", "options": [], @@ -5655,7 +5655,7 @@ "definition": "{__name__=~\"rpc_server_.*_curve_chunkserver_chunk_service_write_chunk\", instance=~\"$ip.*\"}", "hide": 0, "includeAll": false, - "label": "端口号", + "label": "Port", "multi": true, "name": "port", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/client.json b/monitor/grafana/provisioning/dashboards/client.json index a7274595c3..6efc67c597 100644 --- a/monitor/grafana/provisioning/dashboards/client.json +++ b/monitor/grafana/provisioning/dashboards/client.json @@ -101,7 +101,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "客户端运行时间", + "title": "Client runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -200,7 +200,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -215,7 +215,7 @@ "panels": [ { "columns": [], - "description": "客户端的配置情况", + "description": "Configuration of the client", "fontSize": "100%", "gridPos": { "h": 8, @@ -235,7 +235,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -251,7 +251,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -267,7 +267,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -309,12 +309,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "客户端配置", + "title": "Client Configuration", "transform": "table", "type": "table" } ], - "title": "客户端配置", + "title": "Client Configuration", "type": "row" }, { @@ -681,7 +681,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -769,7 +769,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1129,7 +1129,7 @@ } } ], - "title": "用户接口层指标", + "title": "User Interface Layer Metrics", "type": "row" }, { @@ -1236,7 +1236,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "fillGradient": 0, "gridPos": { @@ -1323,7 +1323,7 @@ } } ], - "title": "中间业务层指标", + "title": "Intermediate Business Layer Indicators", "type": "row" }, { @@ -1982,7 +1982,7 @@ } } ], - "title": "rpc层指标", + "title": "Rpc layer metrics", "type": "row" }, { @@ -2085,7 +2085,7 @@ } } ], - "title": "与MDS通信指标", + "title": "Communication metrics with MDS", "type": "row" } ], @@ -2108,7 +2108,7 @@ "definition": "label_values({__name__=~\"curve_client.*\", instance=~\".*:90.*\"}, instance)", "hide": 0, "includeAll": true, - "label": "客户端", + "label": "Client", "multi": true, "name": "client", "options": [], @@ -2136,7 +2136,7 @@ "definition": "{__name__=~\"curve_client.*write_qps\", instance=~\"$client\"}", "hide": 0, "includeAll": true, - "label": "文件", + "label": "File", "multi": true, "name": "file", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/etcd.json b/monitor/grafana/provisioning/dashboards/etcd.json index 82869aa08a..d1a87934bc 100644 --- a/monitor/grafana/provisioning/dashboards/etcd.json +++ b/monitor/grafana/provisioning/dashboards/etcd.json @@ -2464,7 +2464,7 @@ "definition": "etcd_server_has_leader", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/mds.json b/monitor/grafana/provisioning/dashboards/mds.json index c226cf398d..9704ae6e32 100644 --- a/monitor/grafana/provisioning/dashboards/mds.json +++ b/monitor/grafana/provisioning/dashboards/mds.json @@ -115,7 +115,7 @@ "panels": [ { "columns": [], - "description": "mds的配置", + "description": "Configuration of mds", "fontSize": "100%", "gridPos": { "h": 11, @@ -135,7 +135,7 @@ }, "styles": [ { - "alias": "实例", + "alias": "Instance", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -151,7 +151,7 @@ "unit": "short" }, { - "alias": "配置项", + "alias": "Configuration Item", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -167,7 +167,7 @@ "unit": "short" }, { - "alias": "配置值", + "alias": "Configuration Values", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -208,12 +208,12 @@ ], "timeFrom": null, "timeShift": null, - "title": "mds配置", + "title": "Mds configuration", "transform": "table", "type": "table" } ], - "title": "mds配置", + "title": "Mds configuration", "type": "row" }, { @@ -228,7 +228,7 @@ "panels": [ { "cacheTimeout": null, - "description": "磁盘剩余容量", + "description": "Disk remaining capacity", "gridPos": { "h": 7, "w": 4, @@ -283,7 +283,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配的磁盘容量,物理空间", + "description": "Cluster allocated disk capacity, physical space", "gridPos": { "h": 7, "w": 4, @@ -338,7 +338,7 @@ }, { "cacheTimeout": null, - "description": "集群已分配容量,逻辑空间", + "description": "Cluster allocated capacity, logical space", "gridPos": { "h": 7, "w": 4, @@ -393,7 +393,7 @@ }, { "cacheTimeout": null, - "description": "集群总容量", + "description": "Total Cluster Capacity", "gridPos": { "h": 7, "w": 4, @@ -510,7 +510,7 @@ "type": "gauge" } ], - "title": "集群信息", + "title": "Cluster Information", "type": "row" }, { @@ -523,7 +523,7 @@ }, "id": 22, "panels": [], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -598,7 +598,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -679,7 +679,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "进程cpu使用情况", + "title": "Process CPU Usage", "tooltip": { "shared": true, "sort": 0, @@ -763,7 +763,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "内存占用", + "title": "Memory usage", "tooltip": { "shared": true, "sort": 0, @@ -847,7 +847,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "mds元数据缓存大小", + "title": "Mds metadata cache size", "tooltip": { "shared": true, "sort": 0, @@ -896,7 +896,7 @@ "panels": [ { "columns": [], - "description": "逻辑池监控指标", + "description": "Logical Pool Monitoring Metrics", "fontSize": "100%", "gridPos": { "h": 8, @@ -916,7 +916,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -932,7 +932,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -978,7 +978,7 @@ "type": "table" } ], - "title": "逻辑池状态", + "title": "Logical Pool Status", "type": "row" }, { @@ -1082,7 +1082,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的leader count", + "description": "The current leader count of all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1173,7 +1173,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的copyset数量", + "description": "The current number of copysets for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1264,7 +1264,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的scatterwidth", + "description": "The current scatterwidth of all chunkservers", "fill": 1, "gridPos": { "h": 11, @@ -1355,7 +1355,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求速率", + "description": "Current RPC layer write request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1446,7 +1446,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层写请求iops", + "description": "Current rpc layer write requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1537,7 +1537,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求速率", + "description": "Current RPC layer read request rate for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1628,7 +1628,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "所有chunkserver当前的rpc层读请求iops", + "description": "Current rpc layer read requests iops for all chunkservers", "fill": 1, "gridPos": { "h": 10, @@ -1714,7 +1714,7 @@ } } ], - "title": "chunkserver状态", + "title": "Chunkserver Status", "type": "row" }, { @@ -2233,7 +2233,7 @@ } } ], - "title": "调度监控", + "title": "Scheduling Monitoring", "type": "row" }, { @@ -2251,7 +2251,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "所有rpc请求的qps", + "description": "QPS for all rpc requests", "fill": 1, "gridPos": { "h": 8, @@ -2338,7 +2338,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "当前MDS上所有inflight的rpc请求个数", + "description": "The number of rpc requests for all inflight on the current MDS", "fill": 1, "gridPos": { "h": 8, @@ -2431,7 +2431,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -2519,7 +2519,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -2604,7 +2604,7 @@ } } ], - "title": "RPC层指标", + "title": "RPC Layer Metrics", "type": "row" }, { @@ -2622,7 +2622,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "每秒成功处理的heartbeat个数", + "description": "The number of heartbeat successfully processed per second", "fill": 1, "gridPos": { "h": 8, @@ -2709,7 +2709,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat当前inflight的请求个数", + "description": "The current number of inflight requests for heartbeat", "fill": 1, "gridPos": { "h": 8, @@ -2803,7 +2803,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求平均延时", + "description": "Average latency of heartbeat requests", "fill": 1, "gridPos": { "h": 7, @@ -2891,7 +2891,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "heartbeat请求延时分位图", + "description": "Heartbeat Request Delay Bitmap", "fill": 1, "gridPos": { "h": 7, @@ -2974,7 +2974,7 @@ } } ], - "title": "HeartBeat指标", + "title": "HeartBeat metric", "type": "row" } ], @@ -2994,7 +2994,7 @@ "definition": "rpc_server_6666_curve_mds_curve_fsservice_create_file", "hide": 0, "includeAll": true, - "label": "主机", + "label": "Host", "multi": true, "name": "instance", "options": [], diff --git a/monitor/grafana/provisioning/dashboards/report.json b/monitor/grafana/provisioning/dashboards/report.json index 4e26169ddb..f6539e34c4 100644 --- a/monitor/grafana/provisioning/dashboards/report.json +++ b/monitor/grafana/provisioning/dashboards/report.json @@ -224,7 +224,7 @@ }, { "columns": [], - "description": "copyset数量监控指标", + "description": "Copyset quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 8, @@ -244,7 +244,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, "pattern": "__name__", @@ -252,7 +252,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -299,7 +299,7 @@ }, { "columns": [], - "description": "leader数量监控指标", + "description": "Leader quantity monitoring indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -319,7 +319,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -335,7 +335,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -382,7 +382,7 @@ }, { "columns": [], - "description": "scatterwidth指标", + "description": "Scatterwidth indicator", "fontSize": "100%", "gridPos": { "h": 7, @@ -402,7 +402,7 @@ }, "styles": [ { - "alias": "指标", + "alias": "Indicator", "colorMode": null, "dateFormat": "YYYY-MM-DD HH:mm:ss", "decimals": 2, @@ -411,7 +411,7 @@ "unit": "short" }, { - "alias": "值", + "alias": "Value", "colorMode": null, "colors": [ "rgba(245, 54, 54, 0.9)", @@ -470,7 +470,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "MDS各rpc请求的latency", + "description": "Latency of each RPC request in MDS", "fill": 1, "gridPos": { "h": 8, @@ -559,7 +559,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "各请求rpc的qps", + "description": "Qps of each request rpc", "fill": 1, "gridPos": { "h": 8, @@ -834,7 +834,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒读取字节数", + "description": "The number of bytes read per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1095,7 +1095,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "client用户接口层统计到的每秒写入字节数", + "description": "The number of bytes written per second counted by the client user interface layer", "fill": 1, "gridPos": { "h": 8, @@ -1268,7 +1268,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "每秒get leader的重试rpc次数", + "description": "Number of retry rpc attempts per second to get leader", "fill": 1, "gridPos": { "h": 8, @@ -1444,7 +1444,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "client单位大小的平均latency", + "description": "Average latency of client unit size", "fill": 1, "gridPos": { "h": 8, @@ -1747,7 +1747,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkfilepool中剩余的chunk的数量", + "description": "Number of remaining chunks in chunkfilepool", "fill": 1, "gridPos": { "h": 8, @@ -1839,7 +1839,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "chunkserver上所有已分配的chunk的数量", + "description": "The number of all allocated chunks on the chunkserver", "fill": 1, "gridPos": { "h": 8, @@ -1938,7 +1938,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的write chunk个数", + "description": "The number of write chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2032,7 +2032,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功write chunk的字节数", + "description": "The number of bytes per second successfully written chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2209,7 +2209,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunk service层面每秒成功处理的read chunk个数", + "description": "The number of read chunks successfully processed at the chunk service level per second", "fill": 1, "gridPos": { "h": 7, @@ -2301,7 +2301,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver service层面每秒成功read chunk的字节数", + "description": "The number of bytes per second that successfully read chunks at the chunkserver service level", "fill": 1, "gridPos": { "h": 7, @@ -2478,7 +2478,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver read chunk 单位大小内的平均延时", + "description": "Average latency per chunkserver read chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -2572,7 +2572,7 @@ "dashLength": 10, "dashes": false, "datasource": "Prometheus", - "description": "chunkserver write chunk 单位大小内的平均延时", + "description": "Average latency in chunkserver write chunk unit size", "fill": 1, "gridPos": { "h": 7, @@ -3023,7 +3023,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "raft日志落盘的延时", + "description": "Delay in the fall of raft logs", "fill": 1, "gridPos": { "h": 8, @@ -3203,7 +3203,7 @@ "bars": false, "dashLength": 10, "dashes": false, - "description": "安装快照的数据流量", + "description": "Data traffic for installing snapshots", "fill": 1, "gridPos": { "h": 8, diff --git a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json index 3382ca3c84..7eaab10890 100644 --- a/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json +++ b/monitor/grafana/provisioning/dashboards/snapshotcloneserver.json @@ -100,7 +100,7 @@ "thresholds": "1,2", "timeFrom": null, "timeShift": null, - "title": "进程运行时间", + "title": "Process runtime", "type": "singlestat", "valueFontSize": "80%", "valueMaps": [ @@ -161,7 +161,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "cpu使用率", + "title": "CPU usage rate", "tooltip": { "shared": true, "sort": 0, @@ -199,7 +199,7 @@ } } ], - "title": "进程资源", + "title": "Process Resources", "type": "row" }, { @@ -212,12 +212,12 @@ }, "id": 12, "panels": [], - "title": "任务信息", + "title": "Task Information", "type": "row" }, { "columns": [], - "description": "当前快照任务的信息", + "description": "Information about the current snapshot task", "fontSize": "100%", "gridPos": { "h": 9, @@ -470,13 +470,13 @@ ], "timeFrom": null, "timeShift": null, - "title": "快照任务表", + "title": "Snapshot Task Table", "transform": "table", "type": "table" }, { "columns": [], - "description": "当前克隆任务的信息", + "description": "Information about the current cloning task", "fontSize": "100%", "gridPos": { "h": 9, @@ -800,7 +800,7 @@ ], "timeFrom": null, "timeShift": null, - "title": "克隆任务表", + "title": "Clone Task Table", "transform": "table", "type": "table" }, @@ -810,7 +810,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -856,7 +856,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "快照数量统计", + "title": "Number of Snapshots Statistics", "tooltip": { "shared": true, "sort": 0, @@ -901,7 +901,7 @@ "dashLength": 10, "dashes": false, "decimals": 0, - "description": "快照数量统计", + "description": "Number of Snapshots Statistics", "fill": 1, "gridPos": { "h": 8, @@ -947,7 +947,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "克隆数量统计", + "title": "Clone Count Statistics", "tooltip": { "shared": true, "sort": 0, @@ -1002,7 +1002,7 @@ "definition": "{__name__=~\"snapshotcloneserver_.*\"}", "hide": 0, "includeAll": false, - "label": "主机", + "label": "Host", "multi": false, "name": "instance", "options": [], diff --git a/nebd/etc/nebd/nebd-client.conf b/nebd/etc/nebd/nebd-client.conf index 1207e5bbd0..6baa9c2a51 100644 --- a/nebd/etc/nebd/nebd-client.conf +++ b/nebd/etc/nebd/nebd-client.conf @@ -1,28 +1,28 @@ # part2 socket file address nebdserver.serverAddress=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -# 文件锁路径 +# File lock path metacache.fileLockPath=/data/nebd/lock # __CURVEADM_TEMPLATE__ ${prefix}/data/lock __CURVEADM_TEMPLATE__ -# 同步rpc的最大重试次数 +# Maximum number of retries for synchronous rpc request.syncRpcMaxRetryTimes=50 -# rpc请求的重试间隔 +# The retry interval for rpc requests request.rpcRetryIntervalUs=100000 -# rpc请求的最大重试间隔 +# Maximum retry interval for rpc requests request.rpcRetryMaxIntervalUs=64000000 -# rpc hostdown情况下的重试时间 +# The retry time in the case of rpc hostdown request.rpcHostDownRetryIntervalUs=10000 -# brpc的健康检查周期时间,单位s +# The health check cycle time of brpc, in seconds request.rpcHealthCheckIntervalS=1 -# brpc从rpc失败到进行健康检查的最大时间间隔,单位ms +# The maximum time interval from rpc failure to health check in ms for brpc request.rpcMaxDelayHealthCheckIntervalMs=100 -# rpc发送执行队列个数 +# Number of RPC send execution queues request.rpcSendExecQueueNum=2 -# heartbeat间隔 +# heartbeat interval heartbeat.intervalS=5 -# heartbeat rpc超时时间 +# heartbeat RPC timeout heartbeat.rpcTimeoutMs=500 -# 日志路径 +# Log Path log.path=/data/log/nebd/client # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__ diff --git a/nebd/etc/nebd/nebd-server.conf b/nebd/etc/nebd/nebd-server.conf index a6d2fbe534..1ef0966cc6 100644 --- a/nebd/etc/nebd/nebd-server.conf +++ b/nebd/etc/nebd/nebd-server.conf @@ -1,16 +1,16 @@ -# curve-client配置文件地址 +# curve-client configuration file address curveclient.confPath=/etc/curve/client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/client.conf __CURVEADM_TEMPLATE__ -#brpc server监听端口 +# brpc server listening port listen.address=/data/nebd/nebd.sock # __CURVEADM_TEMPLATE__ ${prefix}/data/nebd.sock __CURVEADM_TEMPLATE__ -#元数据文件地址,包含文件名 +# Metadata file address, including file name meta.file.path=/data/nebd/nebdserver.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/nebdserver.meta __CURVEADM_TEMPLATE__ -#心跳超时时间 +# Heartbeat timeout heartbeat.timeout.sec=30 -#文件超时检测时间间隔 +# File timeout detection interval heartbeat.check.interval.ms=3000 # return rpc when io error diff --git a/nebd/nebd-package/usr/bin/nebd-daemon b/nebd/nebd-package/usr/bin/nebd-daemon index 51dabbcf1d..e4f8bdc5c6 100755 --- a/nebd/nebd-package/usr/bin/nebd-daemon +++ b/nebd/nebd-package/usr/bin/nebd-daemon @@ -133,7 +133,7 @@ function stop_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -174,7 +174,7 @@ function restart_one() { return fi - # 判断是否已经通过daemon启动了nebd-server + # Determine if nebd-server has been started through daemon daemon --name ${DAEMON_NAME} --pidfile ${PID_FILE} --running if [ $? -ne 0 ]; then echo "$1: didn't start nebd-server by daemon" @@ -262,7 +262,7 @@ function status() { done } -# 使用方式 +# Usage function usage() { echo "Usage:" echo " nebd-daemon start -- start deamon process and watch on nebd-server process for all instance" @@ -278,7 +278,7 @@ function usage() { echo " nebd-daemon status-one -- show if the nebd-server is running by daemon for current user's instance" } -# 检查参数启动参数,最少1个 +# Check parameter startup parameters, at least 1 if [ $# -lt 1 ]; then usage exit diff --git a/nebd/src/common/configuration.cpp b/nebd/src/common/configuration.cpp index 51ff19fcca..ce8606816b 100644 --- a/nebd/src/common/configuration.cpp +++ b/nebd/src/common/configuration.cpp @@ -55,8 +55,8 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { diff --git a/nebd/src/common/configuration.h b/nebd/src/common/configuration.h index 95df251e80..7553712e72 100644 --- a/nebd/src/common/configuration.h +++ b/nebd/src/common/configuration.h @@ -44,24 +44,24 @@ class Configuration { std::string GetStringValue(const std::string &key); /* - * @brief GetStringValue 获取指定配置项的值 + * @brief GetStringValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetStringValue(const std::string &key, std::string *out); void SetStringValue(const std::string &key, const std::string &value); int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the specified configuration item//NOLINT * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] outThe value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetIntValue(const std::string &key, int *out); bool GetUInt32Value(const std::string &key, uint32_t *out); @@ -71,36 +71,36 @@ class Configuration { double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 + * @brief GetDoubleValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetDoubleValue(const std::string &key, double *out); void SetDoubleValue(const std::string &key, const double value); double GetFloatValue(const std::string &key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 + * @brief GetFloatValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetFloatValue(const std::string &key, float *out); void SetFloatValue(const std::string &key, const float value); bool GetBoolValue(const std::string &key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 + * @brief GetBoolValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetBoolValue(const std::string &key, bool *out); void SetBoolValue(const std::string &key, const bool value); diff --git a/nebd/src/common/crc32.h b/nebd/src/common/crc32.h index 627218fcbd..2e36d65997 100644 --- a/nebd/src/common/crc32.h +++ b/nebd/src/common/crc32.h @@ -32,23 +32,23 @@ namespace nebd { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ inline uint32_t CRC32(const char *pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on the crc32 library of brpc This function supports inheritance + * Calculate to support the calculation of a single CRC checksum for SGL type data. Meet the following constraints: * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); diff --git a/nebd/src/common/file_lock.h b/nebd/src/common/file_lock.h index 277cfebcf7..fec78ed4bc 100644 --- a/nebd/src/common/file_lock.h +++ b/nebd/src/common/file_lock.h @@ -28,7 +28,7 @@ namespace nebd { namespace common { -// 文件锁 +// File lock class FileLock { public: explicit FileLock(const std::string& fileName) @@ -38,21 +38,21 @@ class FileLock { ~FileLock() = default; /** - * @brief 获取文件锁 - * @return 成功返回0,失败返回-1 + * @brief Get file lock + * @return returns 0 for success, -1 for failure */ int AcquireFileLock(); /** - * @brief 释放文件锁 + * @brief Release file lock */ void ReleaseFileLock(); private: - // 锁文件的文件名 + // Lock the file name of the file std::string fileName_; - // 锁文件的fd + // Lock file fd int fd_; }; diff --git a/nebd/src/common/name_lock.h b/nebd/src/common/name_lock.h index ae34c182a9..6883902ec0 100644 --- a/nebd/src/common/name_lock.h +++ b/nebd/src/common/name_lock.h @@ -40,26 +40,26 @@ class NameLock : public Uncopyable { explicit NameLock(int bucketNum = 256); /** - * @brief 对指定string加锁 + * @brief locks the specified string * - * @param lockStr 被加锁的string + * @param lockStr locked string */ void Lock(const std::string &lockStr); /** - * @brief 尝试指定sting加锁 + * @brief Attempt to specify sting lock * - * @param lockStr 被加锁的string + * @param lockStr locked string * - * @retval 成功 - * @retval 失败 + * @retval succeeded + * @retval failed */ bool TryLock(const std::string &lockStr); /** - * @brief 对指定string解锁 + * @brief unlocks the specified string * - * @param lockStr 被加锁的string + * @param lockStr locked string */ void Unlock(const std::string &lockStr); diff --git a/nebd/src/common/stringstatus.h b/nebd/src/common/stringstatus.h index fc4c9a6364..bbdc0340c6 100644 --- a/nebd/src/common/stringstatus.h +++ b/nebd/src/common/stringstatus.h @@ -33,15 +33,15 @@ namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ void ExposeAs(const std::string &prefix, const std::string &name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,26 +49,26 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key value map to status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ std::string GetValueByKey(const std::string &key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common diff --git a/nebd/src/common/timeutility.h b/nebd/src/common/timeutility.h index a80afb61b5..5643523e28 100644 --- a/nebd/src/common/timeutility.h +++ b/nebd/src/common/timeutility.h @@ -53,7 +53,7 @@ class TimeUtility { return tm.tv_sec; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; diff --git a/nebd/src/part1/async_request_closure.cpp b/nebd/src/part1/async_request_closure.cpp index 94d1a9f50f..cb99ef3a0f 100644 --- a/nebd/src/part1/async_request_closure.cpp +++ b/nebd/src/part1/async_request_closure.cpp @@ -52,7 +52,7 @@ void AsyncRequestClosure::Run() { if (nebd::client::RetCode::kOK == retCode) { DVLOG(6) << OpTypeToString(aioCtx->op) << " success, fd = " << fd; - // 读请求复制数据 + // Read Request Copy Data if (aioCtx->op == LIBAIO_OP::LIBAIO_OP_READ) { cntl.response_attachment().copy_to( aioCtx->buf, cntl.response_attachment().size()); @@ -73,8 +73,8 @@ void AsyncRequestClosure::Run() { } int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { - // EHOSTDOWN: 找不到可用的server。 - // server可能停止服务了,也可能正在退出中(返回了ELOGOFF) + // EHOSTDOWN: Unable to find an available server. + // The server may have stopped serving or may be exiting (returning ELOGOFF) if (cntl.ErrorCode() == EHOSTDOWN) { return requestOption_.rpcHostDownRetryIntervalUs; } diff --git a/nebd/src/part1/async_request_closure.h b/nebd/src/part1/async_request_closure.h index 27ab7f613d..f074b45f49 100644 --- a/nebd/src/part1/async_request_closure.h +++ b/nebd/src/part1/async_request_closure.h @@ -47,13 +47,13 @@ struct AsyncRequestClosure : public google::protobuf::Closure { void Retry() const; - // 请求fd + // Request fd int fd; - // 请求上下文信息 + // Request Context Information NebdClientAioContext* aioCtx; - // brpc请求的controller + // Controller requested by brpc brpc::Controller cntl; RequestOption requestOption_; diff --git a/nebd/src/part1/heartbeat_manager.h b/nebd/src/part1/heartbeat_manager.h index 13289cb2d0..ae5ebb73b3 100644 --- a/nebd/src/part1/heartbeat_manager.h +++ b/nebd/src/part1/heartbeat_manager.h @@ -36,8 +36,8 @@ namespace nebd { namespace client { -// Heartbeat 管理类 -// 定期向nebd-server发送已打开文件的心跳信息 +// Heartbeat Management Class +// Regularly send heartbeat information of opened files to nebd-server class HeartbeatManager { public: explicit HeartbeatManager(std::shared_ptr metaCache); @@ -47,30 +47,30 @@ class HeartbeatManager { } /** - * @brief: 启动心跳线程 + * @brief: Start heartbeat thread */ void Run(); /** - * @brief: 停止心跳线程 + * @brief: Stop heartbeat thread */ void Stop(); /** - * @brief 初始化 - * @param heartbeatOption heartbeat 配置项 - * @return 0 初始化成功 / -1 初始化失败 + * @brief initialization + * @param heartbeatOption heartbeat configuration item + * @return 0 initialization successful/-1 initialization failed */ int Init(const HeartbeatOption& option); private: /** - * @brief: 心跳线程执行函数,定期发送心跳消息 + * @brief: Heartbeat thread execution function, sending heartbeat messages regularly */ void HeartBetaThreadFunc(); /** - * @brief: 向part2发送心跳消息,包括当前已打开的卷信息 + * @brief: Send a heartbeat message to part2, including information about the currently opened volume */ void SendHeartBeat(); diff --git a/nebd/src/part1/libnebd.cpp b/nebd/src/part1/libnebd.cpp index ab6093e415..425f5554e3 100644 --- a/nebd/src/part1/libnebd.cpp +++ b/nebd/src/part1/libnebd.cpp @@ -26,7 +26,7 @@ extern "C" { bool g_inited = false; -// Note: 配置文件路径是否有上层传下来比较合适,评估是否要修改 +// Note: It is more appropriate to pass down the configuration file path from the upper level, and evaluate whether it needs to be modified const char* confpath = "/etc/nebd/nebd-client.conf"; int nebd_lib_init() { if (g_inited) { diff --git a/nebd/src/part1/libnebd.h b/nebd/src/part1/libnebd.h index 380776d71b..232157acda 100644 --- a/nebd/src/part1/libnebd.h +++ b/nebd/src/part1/libnebd.h @@ -36,10 +36,10 @@ extern "C" { #include #include -// 文件路径最大的长度,单位字节 +// The maximum length of the file path, in bytes #define NEBD_MAX_FILE_PATH_LEN 1024 -// nebd异步请求的类型 +// Types of nebd asynchronous requests typedef enum LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -55,139 +55,139 @@ void nebd_lib_init_open_flags(NebdOpenFlags* flags); struct NebdClientAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*LibAioCallBack)(struct NebdClientAioContext* context); struct NebdClientAioContext { - off_t offset; // 请求的offset - size_t length; // 请求的length - int ret; // 记录异步返回的返回值 - LIBAIO_OP op; // 异步请求的类型,详见定义 - LibAioCallBack cb; // 异步请求的回调函数 - void* buf; // 请求的buf - unsigned int retryCount; // 记录异步请求的重试次数 + off_t offset; // Requested offset + size_t length; // Requested length + int ret; // Record the return value returned asynchronously + LIBAIO_OP op; // The type of asynchronous request, as defined in the definition + LibAioCallBack cb; // Callback function for asynchronous requests + void* buf; // Buf requested + unsigned int retryCount; // Record the number of retries for asynchronous requests }; // int nebd_lib_fini(void); /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the first call + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_init(void); int nebd_lib_init_with_conf(const char* confPath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ int nebd_lib_uninit(void); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int nebd_lib_open(const char* filename); int nebd_lib_open_with_flags(const char* filename, const NebdOpenFlags* openflags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_close(int fd); /** - * @brief 同步读文件 - * @param fd:文件的fd - * buf:存放读取data的buf - * offset:读取的位置offset - * length:读取的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file reading + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pread(int fd, void* buf, off_t offset, size_t length); /** - * @brief 同步写文件 - * @param fd:文件的fd - * buf:存放写入data的buf - * offset:写入的位置offset - * length:写入的长度 - * @return 成功返回0,失败返回错误码 + * @brief Synchronize file writing + * @param fd: fd of the file + * buf: Store and read data buf + * offset: The position read offset + * length: The length read + * @return success returns 0, failure returns error code */ int nebd_lib_pwrite(int fd, const void* buf, off_t offset, size_t length); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_discard(int fd, struct NebdClientAioContext* context); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pread(int fd, struct NebdClientAioContext* context); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_aio_pwrite(int fd, struct NebdClientAioContext* context); /** - * @brief sync文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief sync file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_sync(int fd); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t nebd_lib_filesize(int fd); int64_t nebd_lib_blocksize(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int nebd_lib_resize(int fd, int64_t size); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int nebd_lib_flush(int fd, struct NebdClientAioContext* context); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error code */ int64_t nebd_lib_getinfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int nebd_lib_invalidcache(int fd); diff --git a/nebd/src/part1/libnebd_file.h b/nebd/src/part1/libnebd_file.h index 6361094ab2..f990d86f1a 100644 --- a/nebd/src/part1/libnebd_file.h +++ b/nebd/src/part1/libnebd_file.h @@ -26,83 +26,83 @@ #include "nebd/src/part1/libnebd.h" /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init4Nebd(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit4Nebd(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open4Nebd(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close4Nebd(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + * size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend4Nebd(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t GetFileSize4Nebd(int fd); int64_t GetBlockSize4Nebd(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush4Nebd(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get info of the file + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error code */ int64_t GetInfo4Nebd(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache4Nebd(int fd); diff --git a/nebd/src/part1/nebd_client.cpp b/nebd/src/part1/nebd_client.cpp index bd1a2202ea..2d1caf1116 100644 --- a/nebd/src/part1/nebd_client.cpp +++ b/nebd/src/part1/nebd_client.cpp @@ -36,13 +36,13 @@ #define RETURN_IF_FALSE(val) if (val == false) { return -1; } -// 修改brpc的health_check_interval参数,这个参数用来控制健康检查的周期 -// ## 健康检查 -// 连接断开的server会被暂时隔离而不会被负载均衡算法选中,brpc会定期连接被隔离的server,以检查他们是否恢复正常,间隔由参数-health_check_interval控制: // NOLINT +// Modify health_check_interval parameter is used to control the period of health checks +// ## Health Check +// The disconnected servers will be temporarily isolated and not selected by the load balancing algorithm. brpc will periodically connect to the isolated servers to check if they have returned to normal. The interval is determined by the parameter-health_check_interval://NOLINT // | Name | Value | Description | Defined At | // NOLINT // | ------------------------- | ----- | ---------------------------------------- | ----------------------- | // NOLINT // | health_check_interval (R) | 3 | seconds between consecutive health-checkings | src/brpc/socket_map.cpp | // NOLINT -// 一旦server被连接上,它会恢复为可用状态。如果在隔离过程中,server从命名服务中删除了,brpc也会停止连接尝试。 // NOLINT +// Once the server is connected, it will return to an available state. If the server is removed from the naming service during the isolation process, brpc will also stop connection attempts// NOLINT namespace brpc { DECLARE_int32(health_check_interval); DECLARE_int32(circuit_breaker_max_isolation_duration_ms); @@ -139,7 +139,7 @@ void NebdClient::Uninit() { } int NebdClient::Open(const char* filename, const NebdOpenFlags* flags) { - // 加文件锁 + // Add file lock std::string fileLockName = option_.fileLockPath + "/" + ReplaceSlash(filename); FileLock fileLock(fileLockName); diff --git a/nebd/src/part1/nebd_client.h b/nebd/src/part1/nebd_client.h index c814f9f711..adf2262c6c 100644 --- a/nebd/src/part1/nebd_client.h +++ b/nebd/src/part1/nebd_client.h @@ -58,93 +58,93 @@ class NebdClient { ~NebdClient() = default; /** - * @brief 初始化nebd,仅在第一次调用的时候真正执行初始化逻辑 - * @param none - * @return 成功返回0,失败返回-1 + * @brief initializes nebd and only executes the initialization logic on the first call + * @param none + * @return returns 0 for success, -1 for failure */ int Init(const char* confpath); /** - * @brief 反初始化nebd - * @param none - * @return 成功返回0,失败返回-1 + * @brief uninitialize nebd + * @param none + * @return returns 0 for success, -1 for failure */ void Uninit(); /** - * @brief open文件 - * @param filename:文件名 - * @return 成功返回文件fd,失败返回错误码 + * @brief open file + * @param filename: File name + * @return successfully returned the file fd, but failed with an error code */ int Open(const char* filename, const NebdOpenFlags* flags); /** - * @brief close文件 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief close file + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int Close(int fd); /** - * @brief resize文件 - * @param fd:文件的fd - * size:调整后的文件size - * @return 成功返回0,失败返回错误码 + * @brief resize file + * @param fd: fd of the file + *Size: adjusted file size + * @return success returns 0, failure returns error code */ int Extend(int fd, int64_t newsize); /** - * @brief 获取文件size - * @param fd:文件的fd - * @return 成功返回文件size,失败返回错误码 + * @brief Get file size + * @param fd: fd of the file + * @return successfully returned the file size, but failed with an error code */ int64_t GetFileSize(int fd); int64_t GetBlockSize(int fd); /** - * @brief discard文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief discard file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int Discard(int fd, NebdClientAioContext* aioctx); /** - * @brief 读文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief Read file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioRead(int fd, NebdClientAioContext* aioctx); /** - * @brief 写文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief write file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int AioWrite(int fd, NebdClientAioContext* aioctx); /** - * @brief flush文件,异步函数 - * @param fd:文件的fd - * context:异步请求的上下文,包含请求所需的信息以及回调 - * @return 成功返回0,失败返回错误码 + * @brief flush file, asynchronous function + * @param fd: fd of the file + * context: The context of an asynchronous request, including the information required for the request and the callback + * @return success returns 0, failure returns error code */ int Flush(int fd, NebdClientAioContext* aioctx); /** - * @brief 获取文件info - * @param fd:文件的fd - * @return 成功返回文件对象size,失败返回错误码 + * @brief Get file information + * @param fd: fd of the file + * @return successfully returned the file object size, but failed with an error code */ int64_t GetInfo(int fd); /** - * @brief 刷新cache,等所有异步请求返回 - * @param fd:文件的fd - * @return 成功返回0,失败返回错误码 + * @brief refresh cache, wait for all asynchronous requests to return + * @param fd: fd of the file + * @return success returns 0, failure returns error code */ int InvalidCache(int fd); @@ -159,17 +159,17 @@ class NebdClient { void InitLogger(const LogOption& logOption); /** - * @brief 替换字符串中的 '/' 为 '+' + * @brief replaces'/'with'+'in the string * - * @param str 需要替换的字符串 - * @return 替换后的字符串 + * @param str The string that needs to be replaced + * @return The replaced string */ std::string ReplaceSlash(const std::string& str); int64_t ExecuteSyncRpc(RpcTask task); - // 心跳管理模块 + // Heartbeat management module std::shared_ptr heartbeatMgr_; - // 缓存模块 + // Cache module std::shared_ptr metaCache_; NebdClientOption option_; diff --git a/nebd/src/part1/nebd_common.h b/nebd/src/part1/nebd_common.h index 432f24534f..7c03839178 100644 --- a/nebd/src/part1/nebd_common.h +++ b/nebd/src/part1/nebd_common.h @@ -25,49 +25,49 @@ #include -// rpc request配置项 +// rpc request configuration item struct RequestOption { - // 同步rpc的最大重试次数 + // Maximum number of retries for synchronous rpc int64_t syncRpcMaxRetryTimes; - // rpc请求的重试间隔 + // The retry interval for rpc requests int64_t rpcRetryIntervalUs; - // rpc请求的最大重试间隔 + // Maximum retry interval for rpc requests int64_t rpcRetryMaxIntervalUs; - // rpc hostdown情况下的重试时间 + // The retry time in the case of rpc hostdown int64_t rpcHostDownRetryIntervalUs; - // brpc的健康检查周期时间 + // Health check cycle time for brpc int64_t rpcHealthCheckIntervalS; - // brpc从rpc失败到进行健康检查的最大时间间隔 + // The maximum time interval between RPC failure and health check in BRPC int64_t rpcMaxDelayHealthCheckIntervalMs; - // rpc发送执行队列个数 + // Number of RPC send execution queues uint32_t rpcSendExecQueueNum = 2; }; -// 日志配置项 +// Log Configuration Item struct LogOption { - // 日志存放目录 + // Log storage directory std::string logPath; }; -// nebd client配置项 +// nebd client configuration item struct NebdClientOption { // part2 socket file address std::string serverAddress; - // 文件锁路径 + // File lock path std::string fileLockPath; - // rpc request配置项 + // rpc request configuration item RequestOption requestOption; - // 日志配置项 + // Log Configuration Item LogOption logOption; }; -// heartbeat配置项 +// heartbeat configuration item struct HeartbeatOption { // part2 socket file address std::string serverAddress; - // heartbeat间隔 + // heartbeat interval int64_t intervalS; - // heartbeat rpc超时时间 + // heartbeat RPC timeout int64_t rpcTimeoutMs; }; diff --git a/nebd/src/part1/nebd_metacache.h b/nebd/src/part1/nebd_metacache.h index 3b596bdf62..27b7a02b61 100644 --- a/nebd/src/part1/nebd_metacache.h +++ b/nebd/src/part1/nebd_metacache.h @@ -51,7 +51,7 @@ struct NebdClientFileInfo { }; /** - * @brief: 保存当前已打开文件信息 + * @brief: Save the information of the currently opened file */ class NebdClientMetaCache { public: @@ -59,33 +59,33 @@ class NebdClientMetaCache { ~NebdClientMetaCache() = default; /** - * @brief: 添加文件信息 - * @param: fileInfo 文件信息 + * @brief: Add file information + * @param: fileInfo: file information */ void AddFileInfo(const NebdClientFileInfo& fileInfo); /** - * @brief: 删除文件信息 - * @param: fd 文件描述符 + * @brief: Delete file information + * @param: fd: file descriptor */ void RemoveFileInfo(int fd); /** - * @brief: 获取对应fd的文件信息 - * @param: fd 文件fd + * @brief: Obtain the file information of the corresponding fd + * @param: fd: file fd * @param[out]: fileInfo - * @return: 0 成功 / -1 返回 + * @return: 0 succeeded/-1 returned */ int GetFileInfo(int fd, NebdClientFileInfo* fileInfo) const; /** - * @brief: 获取当前已打开文件信息 - * @return: 当前已打开文件信息 + * @brief: Get information about currently opened files + * @return: Currently opened file information */ std::vector GetAllFileInfo() const; private: - // 当前已打开文件信息 + // Currently opened file information std::unordered_map fileinfos_; mutable nebd::common::RWLock rwLock_; }; diff --git a/nebd/src/part2/define.h b/nebd/src/part2/define.h index 4c2fc54022..28000e1311 100644 --- a/nebd/src/part2/define.h +++ b/nebd/src/part2/define.h @@ -42,7 +42,7 @@ using ::google::protobuf::RpcController; const char CURVE_PREFIX[] = "cbd"; const char TEST_PREFIX[] = "test"; -// nebd异步请求的类型 +// Types of nebd asynchronous requests enum class LIBAIO_OP { LIBAIO_OP_READ, LIBAIO_OP_WRITE, @@ -70,54 +70,54 @@ using RWLockPtr = std::shared_ptr; struct NebdServerAioContext; -// nebd回调函数的类型 +// The type of nebd callback function typedef void (*NebdAioCallBack)(struct NebdServerAioContext* context); -// nebd server端异步请求的上下文 -// 记录请求的类型、参数、返回信息、rpc信息 +// Context of Nebd server-side asynchronous requests +// Record the type, parameters, return information, and rpc information of the request struct NebdServerAioContext { - // 请求的offset + // Requested offset off_t offset = 0; - // 请求的size + // Requested size size_t size = 0; - // 记录异步返回的返回值 + // Record the return value returned asynchronously int ret = -1; - // 异步请求的类型,详见定义 + // The type of asynchronous request, as defined in the definition LIBAIO_OP op = LIBAIO_OP::LIBAIO_OP_UNKNOWN; - // 异步请求结束时调用的回调函数 + // Callback function called at the end of asynchronous request NebdAioCallBack cb; - // 请求的buf + // Buf requested void* buf = nullptr; - // rpc请求的相应内容 + // The corresponding content of the rpc request Message* response = nullptr; - // rpc请求的回调函数 + // Callback function for rpc requests Closure *done = nullptr; - // rpc请求的controller + // Controller for rpc requests RpcController* cntl = nullptr; // return rpc when io error bool returnRpcWhenIoError = false; }; struct NebdFileInfo { - // 文件大小 + // File size uint64_t size; - // object/chunk大小 + // object/chunk size uint64_t obj_size; - // object数量 + // Number of objects uint64_t num_objs; // block size uint32_t block_size; }; using ExtendAttribute = std::map; -// nebd server 端文件持久化的元数据信息 +// Metadata information for file persistence on the Nebd server side struct NebdFileMeta { int fd; std::string fileName; ExtendAttribute xattr; }; -// part2配置项 +// part2 Configuration Item const char LISTENADDRESS[] = "listen.address"; const char METAFILEPATH[] = "meta.file.path"; const char HEARTBEATTIMEOUTSEC[] = "heartbeat.timeout.sec"; diff --git a/nebd/src/part2/file_entity.cpp b/nebd/src/part2/file_entity.cpp index 0899472c72..da79c66ed0 100644 --- a/nebd/src/part2/file_entity.cpp +++ b/nebd/src/part2/file_entity.cpp @@ -165,11 +165,11 @@ int NebdFileEntity::Reopen(const ExtendAttribute& xattr) { int NebdFileEntity::Close(bool removeMeta) { CHECK(executor_ != nullptr) << "file entity is not inited. " << "filename: " << fileName_; - // 用于和其他用户请求互斥,避免文件被close后,请求发到后端导致返回失败 + // This is used to prevent conflicts with other user requests to ensure that a file is not closed, and requests sent to the backend after the file has been closed result in failures. WriteLockGuard writeLock(rwLock_); - // 这里的互斥锁是为了跟open请求互斥,以下情况可能导致close和open并发 - // part2重启,导致文件被reopen,然后由于超时,文件准备被close - // 此时用户发送了挂载卷请求对文件进行open + // The mutex lock here is to prevent conflicts with open requests. The following scenarios may lead to concurrent close and open operations: + // part2 restarts, causing the file to be reopened. Due to a timeout, the file is about to be closed. + // At this point, a user sends a request to mount a volume, which involves opening the file. std::unique_lock lock(fileStatusMtx_); if (status_ == NebdFileStatus::OPENED) { int ret = executor_->Close(fileInstance_.get()); @@ -340,12 +340,12 @@ int NebdFileEntity::ProcessAsyncRequest(ProcessTask task, return -1; } - // 对于异步请求,将此closure传给aiocontext,从而在请求返回时释放读锁 + // For asynchronous requests, pass this closure to aiocontext to release the read lock when the request returns done->SetClosure(aioctx->done); aioctx->done = doneGuard.release(); int ret = task(); if (ret < 0) { - // 如果请求失败,这里要主动释放锁,并将aiocontext还原回去 + // If the request fails, the lock should be actively released here and the aiocontext should be restored back brpc::ClosureGuard doneGuard(done); aioctx->done = done->GetClosure(); done->SetClosure(nullptr); @@ -381,7 +381,7 @@ int NebdFileEntity::UpdateFileStatus(NebdFileInstancePtr fileInstance) { } bool NebdFileEntity::GuaranteeFileOpened() { - // 文件如果已经被用户close了,就不允许后面请求再自动打开进行操作了 + // If the file has already been closed by the user, subsequent requests for automatic opening for operation are not allowed if (status_ == NebdFileStatus::DESTROYED) { LOG(ERROR) << "File has been destroyed. " << "filename: " << fileName_ diff --git a/nebd/src/part2/file_entity.h b/nebd/src/part2/file_entity.h index fb1e1448d8..2ac04c6c58 100644 --- a/nebd/src/part2/file_entity.h +++ b/nebd/src/part2/file_entity.h @@ -53,9 +53,9 @@ class NebdFileInstance; class NebdRequestExecutor; using NebdFileInstancePtr = std::shared_ptr; -// 处理用户请求时需要加读写锁,避免close时仍有用户IO未处理完成 -// 对于异步IO来说,只有返回时才能释放读锁,所以封装成Closure -// 在发送异步请求前,将closure赋值给NebdServerAioContext +// When processing user requests, it is necessary to add a read write lock to avoid user IO still not being processed when closing +// For asynchronous IO, the read lock can only be released on return, so it is encapsulated as a Closure +// Assign the closure value to NebdServerAioContext before sending an asynchronous request class NebdRequestReadLockClosure : public Closure { public: explicit NebdRequestReadLockClosure(BthreadRWLock& rwLock) // NOLINT @@ -96,70 +96,70 @@ class NebdFileEntity : public std::enable_shared_from_this { virtual ~NebdFileEntity(); /** - * 初始化文件实体 - * @param option: 初始化参数 - * @return 成功返回0, 失败返回-1 + * Initialize File Entity + * @param option: Initialize parameters + * @return returns 0 for success, -1 for failure */ virtual int Init(const NebdFileEntityOption& option); /** - * 打开文件 - * @return 成功返回fd,失败返回-1 + * Open File + * @return successfully returns fd, failure returns -1 */ virtual int Open(const OpenFlags* openflags); /** - * 重新open文件,如果之前的后端存储的连接还存在则复用之前的连接 - * 否则与后端存储建立新的连接 - * @param xattr: 文件reopen需要的信息 - * @return 成功返回fd,失败返回-1 + * Reopen the file and reuse the previous backend storage connection if it still exists + * Otherwise, establish a new connection with the backend storage + * @param xattr: Information required for file reopening + * @return successfully returns fd, failure returns -1 */ virtual int Reopen(const ExtendAttribute& xattr); /** - * 关闭文件 - * @param removeMeta: 是否要移除文件元数据记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + *Close File + * @param removeMeta: Do you want to remove the file metadata record? True means remove, false means not remove + * If it is a close request passed from part1, this parameter is true + * If it is a close request initiated by the heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(bool removeMeta); /** - * 给文件扩容 - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int64_t newsize); /** - * 获取文件信息 - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(); @@ -185,45 +185,45 @@ class NebdFileEntity : public std::enable_shared_from_this { private: /** - * 更新文件状态,包括元信息文件和内存状态 - * @param fileInstancea: open或reopen返回的文件上下文信息 - * @return: 成功返回0,失败返回-1 + * Update file status, including meta information files and memory status + * @param fileInstancea: The file context information returned by open or reopen + * @return: Success returns 0, failure returns -1 */ int UpdateFileStatus(NebdFileInstancePtr fileInstance); /** - * 请求统一处理函数 - * @param task: 实际请求执行的函数体 - * @return: 成功返回0,失败返回-1 + * Request Unified Processing Function + * @param task: The actual request to execute the function body + * @return: Success returns 0, failure returns -1 */ using ProcessTask = std::function; int ProcessSyncRequest(ProcessTask task); int ProcessAsyncRequest(ProcessTask task, NebdServerAioContext* aioctx); - // 确保文件处于opened状态,如果不是则尝试进行open - // 无法open或者open失败,则返回false, - // 如果文件处于open状态,则返回true + // Ensure that the file is in an open state, and if not, attempt to open it + // Unable to open or failed to open, returns false, + // If the file is in the open state, return true bool GuaranteeFileOpened(); private: - // 文件读写锁,处理请求前加读锁,close文件的时候加写锁 - // 避免close时还有请求未处理完 + // File read/write lock, apply read lock before processing requests, and apply write lock when closing files + // Avoiding pending requests during close BthreadRWLock rwLock_; - // 互斥锁,用于open、close之间的互斥 + // Mutex lock, used for mutual exclusion between open and close bthread::Mutex fileStatusMtx_; - // nebd server为该文件分配的唯一标识符 + // The unique identifier assigned by the nebd server to this file int fd_; - // 文件名称 + // File Name std::string fileName_; std::unique_ptr openFlags_; - // 文件当前状态,opened表示文件已打开,closed表示文件已关闭 + // The current state of the file, where 'opened' indicates that the file is open and 'closed' indicates that the file is closed std::atomic status_; - // 该文件上一次收到心跳时的时间戳 + // The timestamp of the last time the file received a heartbeat std::atomic timeStamp_; - // 文件在executor open时返回上下文信息,用于后续文件的请求处理 + // When the file is opened by the executor, contextual information is returned for subsequent file request processing NebdFileInstancePtr fileInstance_; - // 文件对应的executor的指针 + // Pointer to the executor corresponding to the file NebdRequestExecutor* executor_; - // 元数据持久化管理 + // Metadata Persistence Management MetaFileManagerPtr metaFileManager_; }; using NebdFileEntityPtr = std::shared_ptr; diff --git a/nebd/src/part2/file_manager.cpp b/nebd/src/part2/file_manager.cpp index 5c1dc2a15c..3223ba783d 100644 --- a/nebd/src/part2/file_manager.cpp +++ b/nebd/src/part2/file_manager.cpp @@ -62,14 +62,14 @@ int NebdFileManager::Fini() { } int NebdFileManager::Load() { - // 从元数据文件中读取持久化的文件信息 + // Reading persistent file information from metadata files std::vector fileMetas; int ret = metaFileManager_->ListFileMeta(&fileMetas); if (ret < 0) { LOG(ERROR) << "Load file metas failed."; return ret; } - // 根据持久化的信息重新open文件 + // Reopen files based on persistent information int maxFd = 0; for (auto& fileMeta : fileMetas) { NebdFileEntityPtr entity = @@ -221,7 +221,7 @@ NebdFileEntityPtr NebdFileManager::GenerateFileEntity( } } - // 检测是否存在冲突的文件记录 + // Detect for conflicting file records auto iter = fileMap_.find(fd); if (iter != fileMap_.end()) { LOG(ERROR) << "File entity conflict. " diff --git a/nebd/src/part2/file_manager.h b/nebd/src/part2/file_manager.h index bac54fd1fa..5caa9336ea 100644 --- a/nebd/src/part2/file_manager.h +++ b/nebd/src/part2/file_manager.h @@ -54,119 +54,119 @@ class NebdFileManager { explicit NebdFileManager(MetaFileManagerPtr metaFileManager); virtual ~NebdFileManager(); /** - * 停止FileManager并释放FileManager资源 - * @return 成功返回0,失败返回-1 + * Stop FileManager and release FileManager resources + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 启动FileManager - * @return 成功返回0,失败返回-1 + * Start FileManager + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 打开文件 - * @param filename: 文件的filename - * @return 成功返回fd,失败返回-1 + * Open File + * @param filename: The filename of the file + * @return successfully returns fd, failure returns -1 */ virtual int Open(const std::string& filename, const OpenFlags* flags); /** - * 关闭文件 - * @param fd: 文件的fd - * @param removeRecord: 是否要移除文件记录,true表示移除,false表示不移除 - * 如果是part1传过来的close请求,此参数为true - * 如果是heartbeat manager发起的close请求,此参数为false - * @return 成功返回0,失败返回-1 + * Close File + * @param fd: fd of the file + * @param removeRecord: Do you want to remove the file record? True means remove, false means not remove + * If it is a close request passed from part1, this parameter is true + * If it is a close request initiated by the heartbeat manager, this parameter is false + * @return returns 0 for success, -1 for failure */ virtual int Close(int fd, bool removeRecord); /** - * 给文件扩容 - * @param fd: 文件的fd - * @param newsize: 新的文件大小 - * @return 成功返回0,失败返回-1 + * Expand file capacity + * @param fd: fd of the file + * @param newsize: New file size + * @return returns 0 for success, -1 for failure */ virtual int Extend(int fd, int64_t newsize); /** - * 获取文件信息 - * @param fd: 文件的fd - * @param fileInfo[out]: 文件信息 - * @return 成功返回0,失败返回-1 + * Obtain file information + * @param fd: fd of the file + * @param fileInfo[out]: File information + * @return returns 0 for success, -1 for failure */ virtual int GetInfo(int fd, NebdFileInfo* fileInfo); /** - * 异步请求,回收指定区域空间 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to reclaim the specified area space + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Discard(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,读取指定区域内容 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request to read the content of the specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioRead(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,写数据到指定区域 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous request, writing data to a specified area + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int AioWrite(int fd, NebdServerAioContext* aioctx); /** - * 异步请求,flush文件缓存 - * @param fd: 文件的fd - * @param aioctx: 异步请求上下文 - * @return 成功返回0,失败返回-1 + * Asynchronous requests, flush file caching + * @param fd: fd of the file + * @param aioctx: Asynchronous request context + * @return returns 0 for success, -1 for failure */ virtual int Flush(int fd, NebdServerAioContext* aioctx); /** - * 使指定文件缓存失效 - * @param fd: 文件的fd - * @return 成功返回0,失败返回-1 + * Invalidate the specified file cache + * @param fd: fd of the file + * @return returns 0 for success, -1 for failure */ virtual int InvalidCache(int fd); - // 根据fd从map中获取指定的entity - // 如果entity已存在,返回entity指针,否则返回nullptr + // Obtain the specified entity from the map based on fd + // If entity already exists, return entity pointer; otherwise, return nullptr virtual NebdFileEntityPtr GetFileEntity(int fd); virtual FileEntityMap GetFileEntityMap(); - // 将所有文件状态输出到字符串 + // Output all file states to a string std::string DumpAllFileStatus(); // set public for test - // 启动时从metafile加载文件记录,并reopen文件 + // Load file records from metafile at startup and reopen the file int Load(); private: - // 分配新的可用的fd,fd不允许和已经存在的重复 - // 成功返回的可用fd,失败返回-1 + // Assign new available fds, fds are not allowed to duplicate existing ones + // Successfully returned available fd, failed returned -1 int GenerateValidFd(); - // 根据文件名获取file entity - // 如果entity存在,直接返回entity指针 - // 如果entity不存在,则创建新的entity,并插入map,然后返回 + // Obtain file entity based on file name + // If entity exists, directly return the entity pointer + // If the entity does not exist, create a new entity, insert a map, and then return NebdFileEntityPtr GetOrCreateFileEntity(const std::string& fileName); - // 根据fd和文件名生成file entity, - // 如果fd对于的entity已存在,直接返回entity指针 - // 如果entity不存在,则生成新的entity,并插入map,然后返回 + // Generate file entity based on fd and file name, + // If fd already exists for entity, directly return the entity pointer + // If the entity does not exist, generate a new entity, insert a map, and then return NebdFileEntityPtr GenerateFileEntity(int fd, const std::string& fileName); - // 删除指定fd对应的entity + //Delete the entity corresponding to the specified fd void RemoveEntity(int fd); private: - // 当前filemanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of the filemanager, where true indicates running and false indicates not running std::atomic isRunning_; - // 文件名锁,对同名文件加锁 + // File name lock, lock files with the same name NameLock nameLock_; - // fd分配器 + // Fd distributor FdAllocator fdAlloc_; - // nebd server 文件记录管理 + // nebd server file record management MetaFileManagerPtr metaFileManager_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; - // 文件fd和文件实体的映射 + // Mapping of file fd and file entities FileEntityMap fileMap_; }; using NebdFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/heartbeat_manager.cpp b/nebd/src/part2/heartbeat_manager.cpp index 4516874807..8ba76a936a 100644 --- a/nebd/src/part2/heartbeat_manager.cpp +++ b/nebd/src/part2/heartbeat_manager.cpp @@ -107,7 +107,7 @@ void HeartbeatManager::CheckTimeoutFunc() { bool HeartbeatManager::CheckNeedClosed(NebdFileEntityPtr entity) { uint64_t curTime = TimeUtility::GetTimeofDayMs(); uint64_t interval = curTime - entity->GetFileTimeStamp(); - // 文件如果是opened状态,并且已经超时,则需要调用close + // If the file is in an open state and has timed out, you need to call close bool needClose = entity->GetFileStatus() == NebdFileStatus::OPENED && interval > (uint64_t)1000 * heartbeatTimeoutS_; return needClose; diff --git a/nebd/src/part2/heartbeat_manager.h b/nebd/src/part2/heartbeat_manager.h index 73943bc4bc..1281e27576 100644 --- a/nebd/src/part2/heartbeat_manager.h +++ b/nebd/src/part2/heartbeat_manager.h @@ -45,11 +45,11 @@ using nebd::common::WriteLockGuard; using nebd::common::ReadLockGuard; struct HeartbeatManagerOption { - // 文件心跳超时时间(单位:秒) + // File heartbeat timeout (in seconds) uint32_t heartbeatTimeoutS; - // 心跳超时检测线程的检测间隔(时长:毫秒) + // Heartbeat timeout detection thread detection interval (duration: milliseconds) uint32_t checkTimeoutIntervalMs; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager; }; @@ -65,15 +65,15 @@ struct NebdClientInfo { version.Set(kVersion, version2); version.Update(); } - // nebd client的进程号 + // Process number of nebd client int pid; - // nebd version的metric + // The metric of nebd version nebd::common::StringStatus version; - // 上次心跳的时间戳 + // Time stamp of last heartbeat uint64_t timeStamp; }; -// 负责文件心跳超时管理 +// Responsible for managing file heartbeat timeout class HeartbeatManager { public: explicit HeartbeatManager(HeartbeatManagerOption option) @@ -85,14 +85,14 @@ class HeartbeatManager { } virtual ~HeartbeatManager() {} - // 启动心跳检测线程 + // Start Heartbeat Detection Thread virtual int Run(); - // 停止心跳检测线程 + // Stop Heartbeat Detection Thread virtual int Fini(); - // part2收到心跳后,会通过该接口更新心跳中包含的文件在内存中记录的时间戳 - // 心跳检测线程会根据该时间戳判断是否需要关闭文件 + // After receiving the heartbeat, part2 will update the timestamp of the files included in the heartbeat recorded in memory through this interface + // The heartbeat detection thread will determine whether the file needs to be closed based on this timestamp virtual bool UpdateFileTimestamp(int fd, uint64_t timestamp); - // part2收到心跳后,会通过该接口更新part1的时间戳 + // After receiving the heartbeat, part2 will update the timestamp of part1 through this interface virtual void UpdateNebdClientInfo(int pid, const std::string& version, uint64_t timestamp); std::map> GetNebdClients() { @@ -101,31 +101,31 @@ class HeartbeatManager { } private: - // 心跳检测线程的函数执行体 + // Function execution body of heartbeat detection thread void CheckTimeoutFunc(); - // 判断文件是否需要close + // Determine if the file needs to be closed bool CheckNeedClosed(NebdFileEntityPtr entity); - // 从内存中删除已经超时的nebdClientInfo + // Delete nebdClientInfo that has timed out from memory void RemoveTimeoutNebdClient(); private: - // 当前heartbeatmanager的运行状态,true表示正在运行,false标为未运行 + // The current running status of heartbeatmanager, where true indicates running and false indicates not running std::atomic isRunning_; - // 文件心跳超时时长 + // File heartbeat timeout duration uint32_t heartbeatTimeoutS_; - // 心跳超时检测线程的检测时间间隔 + // Heartbeat timeout detection thread detection time interval uint32_t checkTimeoutIntervalMs_; - // 心跳检测线程 + // Heartbeat detection thread std::thread checkTimeoutThread_; - // 心跳检测线程的sleeper + // sleeper for Heartbeat Detection Thread InterruptibleSleeper sleeper_; - // filemanager 对象指针 + // filemanager object pointer NebdFileManagerPtr fileManager_; - // nebd client的信息 + // Information on nebd client std::map> nebdClients_; - // nebdClient的计数器 + // Counters for nebdClient bvar::Adder nebdClientNum_; - // file map 读写保护锁 + // file map read write protection lock RWLock rwLock_; }; diff --git a/nebd/src/part2/main.cpp b/nebd/src/part2/main.cpp index f8c742fe9a..8d7792961c 100644 --- a/nebd/src/part2/main.cpp +++ b/nebd/src/part2/main.cpp @@ -28,12 +28,12 @@ DEFINE_string(confPath, "/etc/nebd/nebd-server.conf", "nebd server conf path"); int main(int argc, char* argv[]) { - // 解析参数 + // Parsing parameters google::ParseCommandLineFlags(&argc, &argv, false); google::InitGoogleLogging(argv[0]); std::string confPath = FLAGS_confPath.c_str(); - // 启动nebd server + // Start nebd server auto server = std::make_shared<::nebd::server::NebdServer>(); int initRes = server->Init(confPath); if (initRes < 0) { @@ -42,7 +42,7 @@ int main(int argc, char* argv[]) { } server->RunUntilAskedToQuit(); - // 停止nebd server + // Stop nebd server server->Fini(); google::ShutdownGoogleLogging(); diff --git a/nebd/src/part2/metafile_manager.cpp b/nebd/src/part2/metafile_manager.cpp index 23ce070f6e..b2338542a4 100644 --- a/nebd/src/part2/metafile_manager.cpp +++ b/nebd/src/part2/metafile_manager.cpp @@ -54,7 +54,7 @@ int NebdMetaFileManager::UpdateFileMeta(const std::string& fileName, WriteLockGuard writeLock(rwLock_); bool needUpdate = metaCache_.find(fileName) == metaCache_.end() || fileMeta != metaCache_[fileName]; - // 如果元数据信息没发生变更,则不需要写文件 + // If the metadata information has not changed, there is no need to write a file if (!needUpdate) { return 0; } @@ -105,15 +105,15 @@ int NebdMetaFileManager::UpdateMetaFile(const FileMetaMap& fileMetas) { } int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { - // 写入tmp文件 + // Write tmp file std::string tmpFilePath = metaFilePath_ + ".tmp"; int fd = wrapper_->open(tmpFilePath.c_str(), O_CREAT|O_RDWR, 0644); - // open文件失败 + // Open file failed if (fd <= 0) { LOG(ERROR) << "Open tmp file " << tmpFilePath << " fail"; return -1; } - // 写入 + // Write std::string jsonString = root.toStyledString(); int writeSize = wrapper_->pwrite(fd, jsonString.c_str(), jsonString.size(), 0); @@ -123,7 +123,7 @@ int NebdMetaFileManager::AtomicWriteFile(const Json::Value& root) { return -1; } - // 重命名 + // Rename int res = wrapper_->rename(tmpFilePath.c_str(), metaFilePath_.c_str()); if (res != 0) { LOG(ERROR) << "rename file " << tmpFilePath << " to " @@ -138,7 +138,7 @@ int NebdMetaFileManager::LoadFileMeta() { FileMetaMap tempMetas; std::ifstream in(metaFilePath_, std::ios::binary); if (!in) { - // 这里不应该返回错误,第一次初始化的时候文件可能还未创建 + // There should be no error returned here, the file may not have been created during the first initialization LOG(WARNING) << "File not exist: " << metaFilePath_; return 0; } @@ -180,7 +180,7 @@ int NebdMetaFileParser::Parse(Json::Value root, return -1; } fileMetas->clear(); - // 检验crc + // Check crc if (root[kCRC].isNull()) { LOG(ERROR) << "Parse json: " << root << " fail, no crc"; @@ -197,7 +197,7 @@ int NebdMetaFileParser::Parse(Json::Value root, return -1; } - // 没有volume字段 + // No volume field const auto& volumes = root[kVolumes]; if (volumes.isNull()) { LOG(WARNING) << "No volumes in json: " << root; @@ -224,7 +224,7 @@ int NebdMetaFileParser::Parse(Json::Value root, meta.fd = volume[kFd].asInt(); } - // 除了filename和fd的部分统一放到xattr里面 + // Except for the parts of filename and fd, they are uniformly placed in xattr Json::Value::Members mem = volume.getMemberNames(); ExtendAttribute xattr; for (auto iter = mem.begin(); iter != mem.end(); iter++) { @@ -253,7 +253,7 @@ Json::Value NebdMetaFileParser::ConvertFileMetasToJson( Json::Value root; root[kVolumes] = volumes; - // 计算crc + // Calculate crc std::string jsonString = root.toStyledString(); uint32_t crc = nebd::common::CRC32(jsonString.c_str(), jsonString.size()); root[kCRC] = crc; diff --git a/nebd/src/part2/metafile_manager.h b/nebd/src/part2/metafile_manager.h index a46255a467..5c619dca2e 100644 --- a/nebd/src/part2/metafile_manager.h +++ b/nebd/src/part2/metafile_manager.h @@ -71,37 +71,37 @@ class NebdMetaFileManager { NebdMetaFileManager(); virtual ~NebdMetaFileManager(); - // 初始化,主要从文件读取元数据信息并加载到内存 + // Initialization, mainly reading metadata information from files and loading it into memory virtual int Init(const NebdMetaFileManagerOption& option); - // 列出文件记录 + // List file records virtual int ListFileMeta(std::vector* fileMetas); - // 更新文件元数据 + // Update file metadata virtual int UpdateFileMeta(const std::string& fileName, const NebdFileMeta& fileMeta); - // 删除文件元数据 + // Delete file metadata virtual int RemoveFileMeta(const std::string& fileName); private: - // 原子写文件 + // Atomic writing file int AtomicWriteFile(const Json::Value& root); - // 更新元数据文件并更新内存缓存 + // Update metadata files and update memory cache int UpdateMetaFile(const FileMetaMap& fileMetas); - // 初始化从持久化文件读取到内存 + // Initialize reading from persistent files to memory int LoadFileMeta(); private: - // 元数据文件路径 + // Meta Data File Path std::string metaFilePath_; - // 文件系统操作封装 + // File system operation encapsulation std::shared_ptr wrapper_; - // 用于解析Json格式的元数据 + // Metadata for parsing Json format std::shared_ptr parser_; - // MetaFileManager 线程安全读写锁 + // MetaFileManager thread safe read write lock RWLock rwLock_; - // meta文件内存缓存 + // Meta file memory cache FileMetaMap metaCache_; }; using MetaFileManagerPtr = std::shared_ptr; diff --git a/nebd/src/part2/nebd_server.cpp b/nebd/src/part2/nebd_server.cpp index 74e5e2329d..92c78ff426 100644 --- a/nebd/src/part2/nebd_server.cpp +++ b/nebd/src/part2/nebd_server.cpp @@ -75,7 +75,7 @@ int NebdServer::Init(const std::string &confPath, LOG(INFO) << "NebdServer init heartbeatManager ok"; LOG(INFO) << "NebdServer init ok"; - // 暴露版本信息 + // Expose version information LOG(INFO) << "nebd version: " << nebd::common::NebdVersion(); nebd::common::ExposeNebdVersion(); return 0; @@ -238,7 +238,7 @@ bool NebdServer::StartServer() { // start brcp server brpc::ServerOptions option; option.idle_timeout_sec = -1; - // 获取文件锁 + // Obtain file lock common::FileLock fileLock(listenAddress_ + ".lock"); if (fileLock.AcquireFileLock() != 0) { LOG(ERROR) << "Address already in use"; diff --git a/nebd/src/part2/nebd_server.h b/nebd/src/part2/nebd_server.h index c4ee40f23e..0a06556580 100644 --- a/nebd/src/part2/nebd_server.h +++ b/nebd/src/part2/nebd_server.h @@ -52,62 +52,62 @@ class NebdServer { private: /** - * @brief 从配置文件加载配置项 - * @param[in] confPath 配置文件路径 - * @return false-加载配置文件失败 true-加载配置文件成功 + * @brief Load configuration items from the configuration file + * @param[in] confPath Configuration file path + * @return false-Failed to load configuration file, true-Successfully loaded configuration file */ bool LoadConfFromFile(const std::string &confPath); /** - * @brief 初始化NebdFileManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize NebdFileManager + * @return false-initialization failed, true-initialization successful */ bool InitFileManager(); /** - * @brief 初始化request_executor_curve - * @return false-初始化失败 true-初始化成功 + * @brief initialization request_executor_curve + * @return false-initialization failed, true-initialization successful */ bool InitCurveRequestExecutor(); /** - * @brief 初始化NebdMetaFileManager - * @return nullptr-初始化不成功 否则表示初始化成功 + * @brief Initialize NebdMetaFileManager + * @return nullptr - initialization failed; otherwise, it indicates successful initialization */ MetaFileManagerPtr InitMetaFileManager(); /** - * @brief 初始化HeartbeatManagerOption + * @brief Initialize HeartbeatManagerOption * @param[out] opt - * @return false-初始化失败 true-初始化成功 + * @return false-initialization failed, true-initialization successful */ bool InitHeartbeatManagerOption(HeartbeatManagerOption *opt); /** - * @brief 初始化HeartbeatManager - * @return false-初始化失败 true-初始化成功 + * @brief Initialize HeartbeatManager + * @return false-initialization failed, true-initialization successful */ bool InitHeartbeatManager(); /** - * @brief 启动brpc service - * @return false-启动service失败 true-启动service成功 + * @brief Start brpc service + * @return false-Failed to start service, true-Successfully started service */ bool StartServer(); private: - // 配置项 + // Configuration Item Configuration conf_; - // NebdServer监听地址 + // NebdServer Listening Address std::string listenAddress_; - // NebdServer是否处于running状态 + // Is NebdServer in running state bool isRunning_ = false; // brpc server brpc::Server server_; - // 用于接受和处理client端的各种请求 + // Used to accept and process various requests from the client side std::shared_ptr fileManager_; - // 负责文件心跳超时处理 + // Responsible for handling file heartbeat timeout std::shared_ptr heartbeatManager_; // curveclient std::shared_ptr curveClient_; diff --git a/nebd/src/part2/request_executor.h b/nebd/src/part2/request_executor.h index 0d69e3c9c8..f3a121aa6e 100644 --- a/nebd/src/part2/request_executor.h +++ b/nebd/src/part2/request_executor.h @@ -41,14 +41,14 @@ class CurveRequestExecutor; using OpenFlags = nebd::client::ProtoOpenFlags; -// 具体RequestExecutor中会用到的文件实例上下文信息 -// RequestExecutor需要用到的文件上下文信息都记录到FileInstance内 +// The file instance context information used in the specific RequestExecutor +// The file context information required for RequestExecutor is recorded in FileInstance class NebdFileInstance { public: NebdFileInstance() {} virtual ~NebdFileInstance() {} - // 需要持久化到文件的内容,以kv形式返回,例如curve open时返回的sessionid - // 文件reopen的时候也会用到该内容 + // The content that needs to be persisted to the file is returned in kv format, such as the sessionid returned when curve open + // This content will also be used when reopening files ExtendAttribute xattr; }; diff --git a/nebd/src/part2/request_executor_curve.h b/nebd/src/part2/request_executor_curve.h index 11606d1bb1..84dc996727 100644 --- a/nebd/src/part2/request_executor_curve.h +++ b/nebd/src/part2/request_executor_curve.h @@ -54,12 +54,12 @@ void CurveAioCallback(struct CurveAioContext* curveCtx); class FileNameParser { public: /** - * @brief 解析fileName - * 一般格式: + * @brief parsing fileName + * General format: * qemu "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_:/etc/curve/client.conf" //NOLINT * nbd "cbd:pool1//cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_" // NOLINT * @param[in] fileName - * @return 解析结果 + * @return Parsing Result * qemu "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "/etc/curve/client.conf" //NOLINT * nbd "/cinder/volume-6f30d296-07f7-452e-a983-513191f8cd95_cinder_", "" //NOLINT */ @@ -90,38 +90,38 @@ class CurveRequestExecutor : public NebdRequestExecutor { private: /** - * @brief 构造函数 + * @brief constructor */ CurveRequestExecutor() {} /** - * @brief 从NebdFileInstance中解析出curve_client需要的fd - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中文件的fd, 如果小于0,表示解析结果错误 + * @brief Parse the fd needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the fd of the file in curve_client. If less than 0, it indicates an error in the parsing result. */ int GetCurveFdFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 从NebdFileInstance中解析出curbe_client需要的filename - * @param[in] fd NebdFileInstance类型 - * @return 返回curve_client中的filename, 如果为空,表示解析出错 + * @brief Parse the filename needed by curve_client from NebdFileInstance. + * @param[in] fd NebdFileInstance type. + * @return Returns the filename in curve_client. If empty, it indicates an error in the parsing. */ std::string GetFileNameFromNebdFileInstance(NebdFileInstance* fd); /** - * @brief 将NebdServerAioContext类型转换为CurveAioContext类型 - * @param[in] nebdCtx NebdServerAioContext类型 - * @param[out] curveCtx CurveAioContext类型 - * @return -1转换失败,0转换成功 + * @brief Convert NebdServerAioContext type to CurveAioContext type + * @param[in] nebdCtx NebdServerAioContext type + * @param[out] curveCtx CurveAioContext type + * @return -1 conversion failed, 0 conversion succeeded */ int FromNebdCtxToCurveCtx( NebdServerAioContext *nebdCtx, CurveAioContext *curveCtx); /** - * @brief 将LIBAIO_OP类型转换为curve_client中LIBCURVE_OP类型 - * @param[in] op LIBAIO_OP类型 - * @param[out] out LIBCURVE_OP类型 - * @return -1转换失败,0转换成功 + * @brief Convert LIBAIO_OP types to LIBCURVE_OP types in the curve_client + * @param[in] op LIBAIO_OP type + * @param[out] out LIBCURVE_OP type + * @return -1 conversion failed, 0 conversion succeeded */ int FromNebdOpToCurveOp(LIBAIO_OP op, LIBCURVE_OP *out); diff --git a/nebd/src/part2/util.h b/nebd/src/part2/util.h index f733a04577..ca8520d8cf 100644 --- a/nebd/src/part2/util.h +++ b/nebd/src/part2/util.h @@ -51,9 +51,9 @@ class FdAllocator { FdAllocator() : fd_(0) {} ~FdAllocator() {} - // fd的有效值范围为[1, INT_MAX] + // The valid range of values for fd is [1, INT_MAX] int GetNext(); - // 初始化fd的值 + // Initialize the value of fd void InitFd(int fd); private: diff --git a/nebd/test/common/configuration_test.cpp b/nebd/test/common/configuration_test.cpp index 4c9e7b7c21..0b60d14108 100644 --- a/nebd/test/common/configuration_test.cpp +++ b/nebd/test/common/configuration_test.cpp @@ -136,10 +136,10 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } @@ -148,18 +148,18 @@ TEST_F(ConfigurationTest, SaveConfig) { Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } diff --git a/nebd/test/common/test_name_lock.cpp b/nebd/test/common/test_name_lock.cpp index 1f79ec5800..da61d1ae67 100644 --- a/nebd/test/common/test_name_lock.cpp +++ b/nebd/test/common/test_name_lock.cpp @@ -32,29 +32,29 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Same lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - // 同锁同str TryLock失败 + // Same lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Same lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -64,12 +64,12 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Successfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); diff --git a/nebd/test/part1/heartbeat_manager_unittest.cpp b/nebd/test/part1/heartbeat_manager_unittest.cpp index 72de6802d4..830ec266da 100644 --- a/nebd/test/part1/heartbeat_manager_unittest.cpp +++ b/nebd/test/part1/heartbeat_manager_unittest.cpp @@ -77,13 +77,13 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { manager->Run(); - // metaCache中数据为空,不发送心跳消息 + // The data in metaCache is empty and no heartbeat message will be sent for (int i = 0; i < 10; ++i) { ASSERT_EQ(0, fakeHeartBeatService.GetInvokeTimes()); std::this_thread::sleep_for(std::chrono::seconds(1)); } - // 添加数据 + // Add data NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); @@ -91,7 +91,7 @@ TEST_F(HeartbeatManagerTest, InvokeTimesTest) { int times = fakeHeartBeatService.GetInvokeTimes(); ASSERT_TRUE(times >= 9 && times <= 11); - // 清空metaCache数据 + // Clear MetaCache data metaCache->RemoveFileInfo(1); std::this_thread::sleep_for(std::chrono::seconds(2)); @@ -109,7 +109,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { std::vector currentFileInfos; - // 添加一个文件 + // Add a file NebdClientFileInfo fileInfo(1, "/test1", FileLock("/test1.lock")); metaCache->AddFileInfo(fileInfo); HeartbeatFileInfo info; @@ -126,7 +126,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 添加第二个文件 + // Add second file fileInfo = NebdClientFileInfo(2, "/test2", FileLock("/test2.lock")); metaCache->AddFileInfo(fileInfo); info.set_fd(2); @@ -147,7 +147,7 @@ TEST_F(HeartbeatManagerTest, RequestValidTest) { ASSERT_EQ(currentFileInfos[i].name(), latestFileInfos[i].name()); } - // 删除第一个文件 + // Delete the first file metaCache->RemoveFileInfo(1); currentFileInfos.erase(currentFileInfos.begin()); diff --git a/nebd/test/part1/nebd_client_unittest.cpp b/nebd/test/part1/nebd_client_unittest.cpp index 6822947653..2d2e1a30a6 100644 --- a/nebd/test/part1/nebd_client_unittest.cpp +++ b/nebd/test/part1/nebd_client_unittest.cpp @@ -179,7 +179,7 @@ TEST_F(NebdFileClientTest, AioRpcFailTest) { auto end = std::chrono::system_clock::now(); auto elpased = std::chrono::duration_cast(end - start).count(); // NOLINT - // 重试睡眠时间: 100ms + 200ms + ... + 900ms = 4500ms + // Retrying sleep time: 100ms + 200ms + ... + 900ms = 4500ms ASSERT_TRUE(elpased >= 4000 && elpased <= 5000); } @@ -264,7 +264,7 @@ TEST_F(NebdFileClientTest, NoNebdServerTest) { auto elapsed = std::chrono::duration_cast( end - start).count(); - // rpc failed的清空下,睡眠100ms后继续重试,共重试10次 + // Clear RPC failed and continue to retry after sleeping for 100ms, a total of 10 retries ASSERT_TRUE(elapsed >= 900 && elapsed <= 1100); } ASSERT_EQ(-1, Extend4Nebd(1, kFileSize)); @@ -380,8 +380,8 @@ TEST_F(NebdFileClientTest, ReOpenTest) { int fd = Open4Nebd(kFileName, nullptr); ASSERT_GT(fd, 0); - // 文件已经被打开,并占用文件锁 - // 再次打开时,获取文件锁失败,直接返回 + // The file has been opened and is occupying the file lock + // When reopening, obtaining the file lock failed and returned directly ASSERT_EQ(-1, Open4Nebd(kFileName, nullptr)); ASSERT_EQ(0, Close4Nebd(fd)); diff --git a/nebd/test/part2/file_manager_unittest.cpp b/nebd/test/part2/file_manager_unittest.cpp index 0d13a7b18c..d1293281a4 100644 --- a/nebd/test/part2/file_manager_unittest.cpp +++ b/nebd/test/part2/file_manager_unittest.cpp @@ -71,7 +71,7 @@ class FileManagerTest : public ::testing::Test { } using TestTask = std::function; - // 构造初始环境 + // Construct initial environment void InitEnv() { NebdFileMeta meta; meta.fd = 1; @@ -125,7 +125,7 @@ class FileManagerTest : public ::testing::Test { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件状态为OPENED + // The file status is OPENED ExpectCallRequest(type, 0); ASSERT_EQ(0, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); @@ -134,7 +134,7 @@ class FileManagerTest : public ::testing::Test { .WillOnce(Return(0)); ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件状态为CLOSED + // The file status is CLOSED EXPECT_CALL(*executor_, Open(testFile1, _)) .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) @@ -147,7 +147,7 @@ class FileManagerTest : public ::testing::Test { void RequestFailTest(RequestType type, TestTask task) { InitEnv(); - // 将文件close + // Close the file NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); EXPECT_CALL(*executor_, Close(NotNull())) @@ -155,7 +155,7 @@ class FileManagerTest : public ::testing::Test { ASSERT_EQ(entity1->Close(false), 0); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // open文件失败 + // Open file failed EXPECT_CALL(*executor_, Open(testFile1, _)) .WillOnce(Return(nullptr)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) @@ -163,7 +163,7 @@ class FileManagerTest : public ::testing::Test { ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 更新元数据文件失败 + // Failed to update metadata file EXPECT_CALL(*executor_, Open(testFile1, _)) .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) @@ -173,7 +173,7 @@ class FileManagerTest : public ::testing::Test { ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 执行处理函数失败 + // Failed to execute processing function EXPECT_CALL(*executor_, Open(testFile1, _)) .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile1, _)) @@ -182,7 +182,7 @@ class FileManagerTest : public ::testing::Test { ASSERT_EQ(-1, task(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 将文件状态置为DESTROYED + // Set the file status to DESTROYED EXPECT_CALL(*executor_, Close(NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) @@ -193,7 +193,7 @@ class FileManagerTest : public ::testing::Test { .Times(0); ASSERT_EQ(-1, task(1)); - // 直接将文件删除 + // Delete files directly ASSERT_EQ(0, fileManager_->Close(1, true)); ASSERT_EQ(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(-1, task(1)); @@ -223,10 +223,10 @@ TEST_F(FileManagerTest, RunTest) { EXPECT_CALL(*metaFileManager_, UpdateFileMeta(_, _)) .WillOnce(Return(0)); ASSERT_EQ(fileManager_->Run(), 0); - // 重复run返回失败 + // Repeated run returns failed ASSERT_EQ(fileManager_->Run(), -1); - // 校验结果 + // Verification results FileEntityMap entityMap = fileManager_->GetFileEntityMap(); ASSERT_EQ(1, entityMap.size()); ASSERT_NE(nullptr, entityMap[meta.fd]); @@ -239,12 +239,12 @@ TEST_F(FileManagerTest, RunFailTest) { std::vector fileMetas; fileMetas.emplace_back(meta); - // list file meta失败 + // List file meta failed EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) .WillOnce(Return(-1)); ASSERT_EQ(fileManager_->Run(), -1); - // reopen失败不影响Run成功 + // Reopen failure does not affect Run success EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); @@ -253,7 +253,7 @@ TEST_F(FileManagerTest, RunFailTest) { ASSERT_EQ(fileManager_->Run(), 0); ASSERT_EQ(fileManager_->Fini(), 0); - // 更新metafile失败不影响Run成功 + // Failure to update metafile does not affect the success of Run EXPECT_CALL(*metaFileManager_, ListFileMeta(_)) .WillOnce(DoAll(SetArgPointee<0>(fileMetas), Return(0))); @@ -268,7 +268,7 @@ TEST_F(FileManagerTest, RunFailTest) { TEST_F(FileManagerTest, OpenTest) { InitEnv(); - // open一个不存在的文件 + // Open a non-existent file EXPECT_CALL(*executor_, Open(testFile2, _)) .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) @@ -276,7 +276,7 @@ TEST_F(FileManagerTest, OpenTest) { int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); - // 重复open + // Repeat open fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, 2); @@ -292,7 +292,7 @@ TEST_F(FileManagerTest, OpenTest) { .WillOnce(Return(0)); ASSERT_EQ(entity2->Close(false), 0); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::CLOSED); - // open 已经close的文件, fd不变 + // Open closed files, keep fd unchanged EXPECT_CALL(*executor_, Open(testFile2, _)) .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) @@ -304,7 +304,7 @@ TEST_F(FileManagerTest, OpenTest) { TEST_F(FileManagerTest, OpenFailTest) { InitEnv(); - // 调用后端open接口时出错 + // Error calling backend open interface EXPECT_CALL(*executor_, Open(testFile2, _)) .WillOnce(Return(nullptr)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) @@ -312,7 +312,7 @@ TEST_F(FileManagerTest, OpenFailTest) { int fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // 持久化元数据信息失败 + // Persisting metadata information failed EXPECT_CALL(*executor_, Open(testFile2, _)) .WillOnce(Return(mockInstance_)); EXPECT_CALL(*metaFileManager_, UpdateFileMeta(testFile2, _)) @@ -322,7 +322,7 @@ TEST_F(FileManagerTest, OpenFailTest) { fd = fileManager_->Open(testFile2, nullptr); ASSERT_EQ(fd, -1); - // Open一个非法的filename + // Open an illegal filename EXPECT_CALL(*executor_, Open(_, _)) .Times(0); fd = fileManager_->Open(unknownFile, nullptr); @@ -331,14 +331,14 @@ TEST_F(FileManagerTest, OpenFailTest) { TEST_F(FileManagerTest, CloseTest) { InitEnv(); - // 指定的fd不存在,直接返回成功 + // The specified fd does not exist, return success directly ASSERT_EQ(nullptr, fileManager_->GetFileEntity(2)); ASSERT_EQ(0, fileManager_->Close(2, true)); NebdFileEntityPtr entity1 = fileManager_->GetFileEntity(1); ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,且文件状态为OPENED,removeRecord为false + // The file exists and its status is OPENED, while removeRecord is false EXPECT_CALL(*executor_, Close(NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) @@ -346,7 +346,7 @@ TEST_F(FileManagerTest, CloseTest) { ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为false + // File exists, file status is CLOSED, removeRecord is false EXPECT_CALL(*executor_, Close(NotNull())) .Times(0); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) @@ -354,7 +354,7 @@ TEST_F(FileManagerTest, CloseTest) { ASSERT_EQ(0, fileManager_->Close(1, false)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::CLOSED); - // 文件存在,文件状态为CLOSED,removeRecord为true + // The file exists, the file status is CLOSED, and removeRecord is true EXPECT_CALL(*executor_, Close(NotNull())) .Times(0); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) @@ -372,7 +372,7 @@ TEST_F(FileManagerTest, CloseTest) { NebdFileEntityPtr entity2 = fileManager_->GetFileEntity(2); ASSERT_NE(entity2, nullptr); ASSERT_EQ(entity2->GetFileStatus(), NebdFileStatus::OPENED); - // 文件存在,文件状态为OPENED,removeRecord为true + // File exists, file status is OPENED, removeRecord is true EXPECT_CALL(*executor_, Close(NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile2)) @@ -387,7 +387,7 @@ TEST_F(FileManagerTest, CloseFailTest) { ASSERT_NE(nullptr, entity1); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // executor close 失败 + // Executor close failed EXPECT_CALL(*executor_, Close(NotNull())) .WillOnce(Return(-1)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) @@ -396,7 +396,7 @@ TEST_F(FileManagerTest, CloseFailTest) { ASSERT_NE(nullptr, fileManager_->GetFileEntity(1)); ASSERT_EQ(entity1->GetFileStatus(), NebdFileStatus::OPENED); - // remove file meta 失败 + // Remove file meta failed EXPECT_CALL(*executor_, Close(NotNull())) .WillOnce(Return(0)); EXPECT_CALL(*metaFileManager_, RemoveFileMeta(testFile1)) diff --git a/nebd/test/part2/heartbeat_manager_unittest.cpp b/nebd/test/part2/heartbeat_manager_unittest.cpp index 2ae0e8d221..84da74cb8b 100644 --- a/nebd/test/part2/heartbeat_manager_unittest.cpp +++ b/nebd/test/part2/heartbeat_manager_unittest.cpp @@ -59,10 +59,10 @@ class HeartbeatManagerTest : public ::testing::Test { TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { ASSERT_EQ(heartbeatManager_->Run(), 0); - // 已经在run了不允许重复Run或者Init + // It is already running, and duplicate Run or Init is not allowed ASSERT_EQ(heartbeatManager_->Run(), -1); - // 构造file entity + // Construct file entity uint64_t curTime = TimeUtility::GetTimeofDayMs(); std::shared_ptr entity1 = std::make_shared(); @@ -83,7 +83,7 @@ TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { EXPECT_CALL(*entity3, GetFileStatus()) .WillRepeatedly(Return(NebdFileStatus::OPENED)); - // 构造file map + // Construct a file map FileEntityMap entityMap; entityMap.emplace(1, entity1); entityMap.emplace(2, entity2); @@ -91,7 +91,7 @@ TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { EXPECT_CALL(*fileManager_, GetFileEntityMap()) .WillRepeatedly(Return(entityMap)); - // 预期结果 + // Expected results EXPECT_CALL(*entity1, Close(false)) .Times(AtLeast(1)); EXPECT_CALL(*entity2, Close(false)) @@ -101,7 +101,7 @@ TEST_F(HeartbeatManagerTest, CheckTimeoutTest) { ::sleep(2); ASSERT_EQ(heartbeatManager_->Fini(), 0); - // 重复Fini,也返回成功 + // Repeat Fini and return success ASSERT_EQ(heartbeatManager_->Fini(), 0); } diff --git a/nebd/test/part2/heartbeat_service_test.cpp b/nebd/test/part2/heartbeat_service_test.cpp index 7d60ce6981..d4a8283ad5 100644 --- a/nebd/test/part2/heartbeat_service_test.cpp +++ b/nebd/test/part2/heartbeat_service_test.cpp @@ -45,7 +45,7 @@ class HeartbeatServiceTest : public ::testing::Test { }; TEST_F(HeartbeatServiceTest, KeepAlive) { - // 启动server + // Start server brpc::Server server; NebdHeartbeatServiceImpl heartbeatService(heartbeatManager_); ASSERT_EQ(0, server.AddService(&heartbeatService, @@ -68,7 +68,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { nebd::client::NebdHeartbeatService_Stub stub(&channel); brpc::Controller cntl; - // 正常情况 + // Normal situation EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillRepeatedly(Return(true)); @@ -76,7 +76,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kOK, response.retcode()); - // 有文件更新时间戳失败 + // Failed to update timestamp with file EXPECT_CALL(*heartbeatManager_, UpdateFileTimestamp(_, _)) .Times(3) .WillOnce(Return(false)) @@ -86,7 +86,7 @@ TEST_F(HeartbeatServiceTest, KeepAlive) { ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(nebd::client::RetCode::kNoOK, response.retcode()); - // 停止server + // Stop server server.Stop(0); server.Join(); } diff --git a/nebd/test/part2/metafile_manager_test.cpp b/nebd/test/part2/metafile_manager_test.cpp index 7027cb9da6..110233b443 100644 --- a/nebd/test/part2/metafile_manager_test.cpp +++ b/nebd/test/part2/metafile_manager_test.cpp @@ -61,19 +61,19 @@ TEST_F(MetaFileManagerTest, nomaltest) { NebdMetaFileManager metaFileManager; ASSERT_EQ(metaFileManager.Init(option), 0); std::vector fileMetas; - // 文件不存在 + // File does not exist ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 添加两条记录,curve和test各一 + // Add two records, one for curve and one for test NebdFileMeta fileMeta1; fileMeta1.fileName = "test:volume1"; fileMeta1.fd = 1; ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 使用相同的内容Update + // Update using the same content ASSERT_EQ(0, metaFileManager.UpdateFileMeta(fileMeta1.fileName, fileMeta1)); - // 插入不同的meta + // Insert different meta NebdFileMeta fileMeta2; fileMeta2.fileName = "cbd:volume2"; fileMeta2.fd = 2; @@ -89,9 +89,9 @@ TEST_F(MetaFileManagerTest, nomaltest) { // remove meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta(fileMeta2.fileName)); - // remove 不存在的meta + // remove non-existent meta ASSERT_EQ(0, metaFileManager.RemoveFileMeta("unknown")); - // 校验结果 + // Verification results fileMetas.clear(); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); @@ -111,14 +111,14 @@ TEST_F(MetaFileManagerTest, UpdateMetaFailTest) { fileMetaMap.emplace(fileMeta.fileName, fileMeta); std::vector fileMetas; - // open临时文件失败 + // Open temporary file failed EXPECT_CALL(*wrapper_, open(_, _, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.UpdateFileMeta(fileMeta.fileName, fileMeta)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // 写入临时文件失败 + // Failed to write temporary file EXPECT_CALL(*wrapper_, open(_, _, _)) .WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) @@ -129,7 +129,7 @@ TEST_F(MetaFileManagerTest, UpdateMetaFailTest) { ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(0, fileMetas.size()); - // rename失败 + // Rename failed NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); EXPECT_CALL(*wrapper_, open(_, _, _)) @@ -160,7 +160,7 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { NebdMetaFileParser parser; Json::Value root = parser.ConvertFileMetasToJson(fileMetaMap); - // 先插入一条数据 + // Insert a piece of data first EXPECT_CALL(*wrapper_, open(_, _, _)) .WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) @@ -176,14 +176,14 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { fileMetaMap.erase(fileMeta.fileName); root = parser.ConvertFileMetasToJson(fileMetaMap); - // open临时文件失败 + // Open temporary file failed EXPECT_CALL(*wrapper_, open(_, _, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, metaFileManager.RemoveFileMeta(fileMeta.fileName)); ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // 写入临时文件失败 + // Failed to write temporary file EXPECT_CALL(*wrapper_, open(_, _, _)) .WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) @@ -194,7 +194,7 @@ TEST_F(MetaFileManagerTest, RemoveMetaFailTest) { ASSERT_EQ(0, metaFileManager.ListFileMeta(&fileMetas)); ASSERT_EQ(1, fileMetas.size()); - // rename失败 + // Rename failed EXPECT_CALL(*wrapper_, open(_, _, _)) .WillOnce(Return(1)); EXPECT_CALL(*wrapper_, pwrite(_, _, _, _)) @@ -215,7 +215,7 @@ TEST(MetaFileParserTest, Parse) { Json::Value volumes; FileMetaMap fileMetas; - // 正常情况 + // Normal situation volume[kFileName] = "cbd:volume1"; volume[kFd] = 1; volumes.append(volume); @@ -225,18 +225,18 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(0, parser.Parse(root, &fileMetas)); - // 空指针 + // Null pointer ASSERT_EQ(-1, parser.Parse(root, nullptr)); - // crc校验不正确 + // Incorrect crc verification root[kCRC] = root[kCRC].asUInt() + 1; ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有crc字段 + // No crc field root.removeMember(kCRC); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 没有volumes字段或volumes字段是null,不应该报错 + // There is no volumes field or the volumes field is null, and an error should not be reported root.clear(); root["key"] = "value"; FillCrc(&root); @@ -249,7 +249,7 @@ TEST(MetaFileParserTest, Parse) { ASSERT_EQ(0, parser.Parse(root, &fileMetas)); ASSERT_TRUE(fileMetas.empty()); - // 记录中没有filename + // There is no filename in the record volume.clear(); volumes.clear(); root.clear(); @@ -259,7 +259,7 @@ TEST(MetaFileParserTest, Parse) { FillCrc(&root); ASSERT_EQ(-1, parser.Parse(root, &fileMetas)); - // 记录中没有fd + // The record does not contain an 'fd'. volume.clear(); volumes.clear(); root.clear(); diff --git a/nebd/test/part2/test_nebd_server.cpp b/nebd/test/part2/test_nebd_server.cpp index 1f6f8ef112..b3baffdf43 100644 --- a/nebd/test/part2/test_nebd_server.cpp +++ b/nebd/test/part2/test_nebd_server.cpp @@ -37,11 +37,11 @@ TEST(TestNebdServer, test_Init_Run_Fini) { auto curveClient = std::make_shared(); std::string confPath; - // 1. 配置文件不存在, init失败 + //1. Configuration file does not exist, init failed confPath = "./nebd.conf"; ASSERT_EQ(-1, server.Init(confPath)); - // 2. 配置文件存在, 监听端口未设置 + //2. Configuration file exists, listening port not set confPath = "./nebd/test/part2/nebd-server-err.conf"; Configuration conf; conf.SetBoolValue("response.returnRpcWhenIoError", false); @@ -49,55 +49,55 @@ TEST(TestNebdServer, test_Init_Run_Fini) { conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 3、配置文件中没有client配置 + //3. There is no client configuration in the configuration file conf.SetStringValue("listen.address", "/tmp/nebd-server.sock"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath)); - // 4. curveclient init失败 + //4. Curveclient init failed conf.SetStringValue("curveclient.confPath", "/etc/curve/client.conf"); conf.SaveConfig(); EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(-1)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 5、初始化fileManager失败 + //5. Failed to initialize fileManager EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 6、没有heartbeat.timeout字段 + //6. There is no heartbeat.timeout field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetStringValue("meta.file.path", "./nebd-server-test.meta"); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7、没有heartbeat.check.interval.ms字段 + //7. No heartbeat.check.interval.ms field EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.timeout.sec", 30); conf.SaveConfig(); ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 8. 初始化成功 + //8. Initialized successfully EXPECT_CALL(*curveClient, Init(_)).WillOnce(Return(0)); conf.SetIntValue("heartbeat.check.interval.ms", 3000); conf.SaveConfig(); ASSERT_EQ(0, server.Init(confPath, curveClient)); - // 9. run成功 + //9. Run successful EXPECT_CALL(*curveClient, UnInit()).Times(2); std::thread nebdServerThread(&NebdServer::RunUntilAskedToQuit, &server); sleep(1); - // 10、再次Run会失败 + //10. Running again will fail ASSERT_EQ(-1, server.RunUntilAskedToQuit()); - // 11、Run之后Init会失败 + //11. Init will fail after Run ASSERT_EQ(-1, server.Init(confPath, curveClient)); - // 7. stop成功 + //7. Stop successful ASSERT_EQ(0, server.Fini()); - // 8. 再次stop不会重复释放资源 + //8. Stopping again will not repeatedly release resources ASSERT_EQ(0, server.Fini()); nebdServerThread.join(); } diff --git a/nebd/test/part2/test_request_executor_curve.cpp b/nebd/test/part2/test_request_executor_curve.cpp index 2b749d0615..cc296236a5 100644 --- a/nebd/test/part2/test_request_executor_curve.cpp +++ b/nebd/test/part2/test_request_executor_curve.cpp @@ -77,7 +77,7 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(fileName, _)).Times(0); @@ -86,7 +86,7 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { ASSERT_TRUE(nullptr == ret); } - // 2. curveclient open失败 + // 2. Curveclient open failed { EXPECT_CALL(*curveClient_, Open(curveFileName, _)) .WillOnce(Return(-1)); @@ -95,7 +95,7 @@ TEST_F(TestReuqestExecutorCurve, test_Open) { ASSERT_TRUE(nullptr == ret); } - // 3. open成功 + // 3. Open successful { EXPECT_CALL(*curveClient_, Open(curveFileName, _)) .WillOnce(Return(1)); @@ -117,7 +117,7 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { std::string fileName("cbd:pool1//cinder/volume-1234_cinder_:/client.conf"); std::string curveFileName("/cinder/volume-1234_cinder_"); - // 1. 传入的fileName解析失败 + // 1. Failed to parse the passed in fileName { std::string errFileName("cbd:pool1/:"); EXPECT_CALL(*curveClient_, Open(_, _)).Times(0); @@ -126,7 +126,7 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { ASSERT_TRUE(nullptr == ret); } - // 2. repoen失败 + // 2. repoen failed { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(-1)); @@ -135,7 +135,7 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { ASSERT_TRUE(nullptr == ret); } - // 3. reopen成功 + // 3. reopen successful { EXPECT_CALL(*curveClient_, ReOpen(curveFileName, _)) .WillOnce(Return(1)); @@ -153,14 +153,14 @@ TEST_F(TestReuqestExecutorCurve, test_ReOpen) { TEST_F(TestReuqestExecutorCurve, test_Close) { auto executor = CurveRequestExecutor::GetInstance(); - // 1. nebdFileIns不是CurveFileInstance类型, close失败 + // 1. nebdFileIns is not of type CurveFileInstance, close failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Close(_)).Times(0); ASSERT_EQ(-1, executor.Close(nebdFileIns)); } - // 2. nebdFileIns中的fd<0, close失败 + // 2. fd<0 in nebdFileIns, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -168,7 +168,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 3. 调用curveclient的close接口失败, close失败 + // 3. Calling the close interface of curveclient failed, close failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -177,7 +177,7 @@ TEST_F(TestReuqestExecutorCurve, test_Close) { ASSERT_EQ(-1, executor.Close(curveFileIns)); } - // 4. close成功 + // 4. close successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -191,21 +191,21 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, extend失败 + // 1. nebdFileIns is not of type CurveFileInstance, extend failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(nebdFileIns, 1)); } - // 2. nebdFileIns中的fileName为空, extend失败 + // 2. FileName in nebdFileIns is empty, extend failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, Extend(_, _)).Times(0); ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 3. 调用curveclient的extend接口失败, extend失败 + // 3. Calling the extend interface of curveclient failed, extend failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -214,7 +214,7 @@ TEST_F(TestReuqestExecutorCurve, test_Extend) { ASSERT_EQ(-1, executor.Extend(curveFileIns, 1)); } - // 4. extend成功 + // 4. extend successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -229,14 +229,14 @@ TEST_F(TestReuqestExecutorCurve, test_GetInfo) { NebdFileInfo fileInfo; int curveFd = 123; - // 1. nebdFileIns不是CurveFileInstance类型, stat失败 + // 1. nebdFileIns is not of type CurveFileInstance, stat failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); ASSERT_EQ(-1, executor.GetInfo(nebdFileIns, &fileInfo)); } - // 2. nebdFileIns中的fd为空, stat失败 + // 2. Fd in nebdFileIns is empty, stat failed { auto curveFileIns = new CurveFileInstance(); EXPECT_CALL(*curveClient_, StatFile(curveFd, _)).Times(0); @@ -244,7 +244,7 @@ TEST_F(TestReuqestExecutorCurve, test_GetInfo) { } - // 3. 调用curveclient的stat接口失败, stat失败 + // 3. Calling the stat interface of curveclient failed, stat failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = curveFd; @@ -253,7 +253,7 @@ TEST_F(TestReuqestExecutorCurve, test_GetInfo) { ASSERT_EQ(-1, executor.GetInfo(curveFileIns, &fileInfo)); } - // 4. stat成功 + // 4. stat successful { const uint64_t size = 10ull * 1024 * 1024 * 1024; const uint32_t blocksize = 4096; @@ -278,14 +278,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步读失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous read failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioRead(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioRead(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步读失败 + // 2. fd<0 in nebdFileIns, asynchronous read failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -293,7 +293,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioRead接口失败, 异步读失败 + // 3. Calling the AioRead interface of curveclient failed, asynchronous read failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -307,7 +307,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioRead) { ASSERT_EQ(-1, executor.AioRead(curveFileIns, &aiotcx)); } - // 4. 异步读取成功 + // 4. Asynchronous read successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -327,14 +327,14 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { aiotcx.cb = NebdUnitTestCallback; std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 异步写失败 + // 1. nebdFileIns is not of type CurveFileInstance, asynchronous write failed { auto nebdFileIns = new NebdFileInstance(); EXPECT_CALL(*curveClient_, AioWrite(_, _, _)).Times(0); ASSERT_EQ(-1, executor.AioWrite(nebdFileIns, &aiotcx)); } - // 2. nebdFileIns中的fd<0, 异步写失败 + // 2. fd<0 in nebdFileIns, asynchronous write failed { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = -1; @@ -342,7 +342,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 3. 调用curveclient的AioWrite接口失败, 异步写失败 + // 3. Calling the AioWrite interface of curveclient failed, asynchronous write failed { auto curveFileIns = new CurveFileInstance(); aiotcx.size = 1; @@ -356,7 +356,7 @@ TEST_F(TestReuqestExecutorCurve, test_AioWrite) { ASSERT_EQ(-1, executor.AioWrite(curveFileIns, &aiotcx)); } - // 4. 异步写入成功 + // 4. Asynchronous write successful { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; @@ -448,13 +448,13 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { auto executor = CurveRequestExecutor::GetInstance(); std::string curveFilename("/cinder/volume-1234_cinder_"); - // 1. nebdFileIns不是CurveFileInstance类型, 不合法 + // 1. nebdFileIns is not of type CurveFileInstance, illegal { auto nebdFileIns = new NebdFileInstance(); ASSERT_EQ(-1, executor.InvalidCache(nebdFileIns)); } - // 2. fd<0, 不合法 + // 2. fd<0, illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fileName = curveFilename; @@ -462,14 +462,14 @@ TEST_F(TestReuqestExecutorCurve, test_InvalidCache) { ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 3. filename为空,不合法 + // 3. The filename is empty and illegal { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; ASSERT_EQ(-1, executor.InvalidCache(curveFileIns)); } - // 4. 合法 + // 4. legitimate { auto curveFileIns = new CurveFileInstance(); curveFileIns->fd = 1; diff --git a/proto/chunk.proto b/proto/chunk.proto index af5cd3fb5a..c19303c854 100755 --- a/proto/chunk.proto +++ b/proto/chunk.proto @@ -20,7 +20,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/chunk"; -// Qos 参数 +// Qos parameters message QosRequestParas { optional uint32 clientId = 1; optional int32 dmclockDelta = 2; @@ -28,38 +28,38 @@ message QosRequestParas { } message QosResponseParas { - optional int32 phase = 1; // 0: 代表 reservation 阶段; 1: 代表 priority 阶段 + optional int32 phase = 1; // 0: represents the reservation stage; 1: Representing the priority stage optional int32 cost = 2; // } // For chunk enum CHUNK_OP_TYPE { - CHUNK_OP_DELETE = 0; // 删除 chunk - CHUNK_OP_READ = 1; // 读 chunk - CHUNK_OP_WRITE = 2; // 写 chunk + CHUNK_OP_DELETE = 0; // Delete chunk + CHUNK_OP_READ = 1; // Read chunk + CHUNK_OP_WRITE = 2; // Write chunk CHUNK_OP_READ_SNAP = 3; // read chunk snapshot - // TODO(wudemiao): 后期替换成CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, - // 保证和chunkserver的接口一致 + // TODO(wudemiao): later replaced with CHUNK_OP_DELETE_SNAP_OR_CORRECT_SN, + // Ensure consistency with chunkserver interface CHUNK_OP_DELETE_SNAP = 4; // delete chunk snapshot - CHUNK_OP_CREATE_CLONE = 5; // 创建clone chunk - CHUNK_OP_RECOVER = 6; // 恢复clone chunk - CHUNK_OP_PASTE = 7; // paste chunk 内部请求 + CHUNK_OP_CREATE_CLONE = 5; // Create clone chunk + CHUNK_OP_RECOVER = 6; // Restore clone chunk + CHUNK_OP_PASTE = 7; // paste chunk internal request CHUNK_OP_UNKNOWN = 8; // unknown Op CHUNK_OP_SCAN = 9; // scan oprequest }; -// read/write 的实际数据在 rpc 的 attachment 中 +// The actual data of read/write is in the attachment of rpc message ChunkRequest { required CHUNK_OP_TYPE opType = 1; // for all - required uint32 logicPoolId = 2; // for all // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 2; // for all // logicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 3; // for all required uint64 chunkId = 4; // for all optional uint64 appliedIndex = 5; // for read optional uint32 offset = 6; // for read/write - optional uint32 size = 7; // for read/write/clone 读取数据大小/写入数据大小/创建快照请求中表示请求创建的chunk大小 + optional uint32 size = 7; // for read/write/clone Read data size/Write data size/Create snapshot request represents the chunk size of the request creation optional QosRequestParas deltaRho = 8; // for read/write - optional uint64 sn = 9; // for write/read snapshot 写请求中表示文件当前版本号,读快照请求中表示请求的chunk的版本号 - optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn 用于修改chunk的correctedSn + optional uint64 sn = 9; // for write/read snapshot, in the write request, represents the current version number of the file, and in the read snapshot request, represents the version number of the requested chunk + optional uint64 correctedSn = 10; // for CreateCloneChunk/DeleteChunkSnapshotOrCorrectedSn used to modify the correctedSn of a chunk optional string location = 11; // for CreateCloneChunk optional string cloneFileSource = 12; // for write/read optional uint64 cloneFileOffset = 13; // for write/read @@ -72,28 +72,28 @@ message ChunkRequest { }; enum CHUNK_OP_STATUS { - CHUNK_OP_STATUS_SUCCESS = 0; // 成功 - CHUNK_OP_STATUS_REDIRECTED = 1; // 不是 leader,重定向 - CHUNK_OP_STATUS_DISK_FAIL = 2; // 磁盘返回错误 - CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC 校验失败 - CHUNK_OP_STATUS_INVALID_REQUEST = 4; // 请求参数不对 - CHUNK_OP_STATUS_NOSPACE = 5; // 空间不够 - CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // copyset 不存在 - CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // chunk或其快照文件不存在 - CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // 其他错误 - CHUNK_OP_STATUS_OVERLOAD = 9; // 过载,表示服务端有过多请求未处理返回 - CHUNK_OP_STATUS_BACKWARD = 10; // 请求的版本落后当前chunk的版本 - CHUNK_OP_STATUS_CHUNK_EXIST = 11; // chunk已存在 + CHUNK_OP_STATUS_SUCCESS = 0; // Success + CHUNK_OP_STATUS_REDIRECTED = 1; // Not a leader, redirect + CHUNK_OP_STATUS_DISK_FAIL = 2; // Disk returned error + CHUNK_OP_STATUS_CRC_FAIL = 3; // CRC verification failed + CHUNK_OP_STATUS_INVALID_REQUEST = 4; // The request parameters are incorrect + CHUNK_OP_STATUS_NOSPACE = 5; // Insufficient space + CHUNK_OP_STATUS_COPYSET_NOTEXIST = 6; // Copyset does not exist + CHUNK_OP_STATUS_CHUNK_NOTEXIST = 7; // Chunk or its snapshot file does not exist + CHUNK_OP_STATUS_FAILURE_UNKNOWN = 8; // Other errors + CHUNK_OP_STATUS_OVERLOAD = 9; // Overload indicates that the server has too many requests that have not been processed and returned + CHUNK_OP_STATUS_BACKWARD = 10; // The requested version falls behind the current chunk version + CHUNK_OP_STATUS_CHUNK_EXIST = 11; // Chunk already exists CHUNK_OP_STATUS_EPOCH_TOO_OLD = 12; // request epoch too old }; message ChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - optional uint64 appliedIndex = 3; // 返回当前最新的 committedIndex, 注意 read 和 write 都要返回 + optional string redirect = 2; // Not the leader, redirect to the leader + optional uint64 appliedIndex = 3; // Return the latest committedIndex, note that both read and write must be returned optional QosResponseParas phaseCost = 4; // for read/write - optional uint64 chunkSn = 5; // for GetChunkInfo 表示chunk文件版本号,0表示不存在 - optional uint64 snapSn = 6; // for GetChunkInfo 表示chunk文件快照的版本号,0表示不存在 + optional uint64 chunkSn = 5; // for GetChunkInfo represents the version number of the chunk file, while 0 indicates that it does not exist + optional uint64 snapSn = 6; // for GetChunkInfo represents the version number of the Chunk file snapshot, while 0 indicates that it does not exist }; message GetChunkInfoRequest { @@ -104,8 +104,8 @@ message GetChunkInfoRequest { message GetChunkInfoResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader - repeated uint64 chunkSn = 3; // chunk 版本号 和 snapshot 版本号 + optional string redirect = 2; // Not the leader, redirect to the leader + repeated uint64 chunkSn = 3; // Chunk version number and snapshot version number }; message GetChunkHashRequest { @@ -118,7 +118,7 @@ message GetChunkHashRequest { message GetChunkHashResponse { required CHUNK_OP_STATUS status = 1; - optional string hash = 2; // 能标志chunk数据状态的hash值,一般是crc32c + optional string hash = 2; // The hash value that can indicate the status of chunk data, usually crc32c }; message CreateS3CloneChunkRequest { @@ -131,7 +131,7 @@ message CreateS3CloneChunkRequest { message CreateS3CloneChunkResponse { required CHUNK_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // Not the leader, redirect to the leader }; message UpdateEpochRequest { diff --git a/proto/cli.proto b/proto/cli.proto index 46981c967d..5a0bdd89ff 100755 --- a/proto/cli.proto +++ b/proto/cli.proto @@ -20,12 +20,12 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli"; -// 这里都用 logicPoolId, copysetId,进入 rpc service 之后,会转换成 string -// 类型的 groupId,在传给 raft +// Here, both logicPoolId and copysetId are used. After entering the rpc service, they will be converted to a string +// GroupId of type, passed to raft // | groupId | // | logicPoolId | copysetId | message AddPeerRequest { - required uint32 logicPoolId = 1; // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 1; // LogicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 2; required string leader_id = 3; required string peer_id = 4; diff --git a/proto/cli2.proto b/proto/cli2.proto index 76416f7a9f..b41d00c322 100755 --- a/proto/cli2.proto +++ b/proto/cli2.proto @@ -23,17 +23,17 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli2"; -// cli.proto 供老的使用保证 +// cli.proto guarantee for elderly use message AddPeerRequest2 { - required uint32 logicPoolId = 1; // 逻辑池id - required uint32 copysetId = 2; // 复制组id + required uint32 logicPoolId = 1; // Logical Pool ID + required uint32 copysetId = 2; // Copy Group ID required common.Peer leader = 3; // leader - required common.Peer addPeer = 4; // 新增peer + required common.Peer addPeer = 4; // Add a peer } message AddPeerResponse2 { - repeated common.Peer oldPeers = 1; // 老配置 - repeated common.Peer newPeers = 2; // 新配置 + repeated common.Peer oldPeers = 1; // Old configuration + repeated common.Peer newPeers = 2; // New configuration } message RemovePeerRequest2 { @@ -87,11 +87,11 @@ message SnapshotAllResponse { message GetLeaderRequest2 { required uint32 logicPoolId = 1; required uint32 copysetId = 2; - optional common.Peer peer = 3; // 可以不指定peer查leader + optional common.Peer peer = 3; // You can check the leader without specifying a peer } message GetLeaderResponse2 { - required common.Peer leader = 1; // 通过peer判空来判断是否返回leader + required common.Peer leader = 1; // Determine whether to return the leader by judging whether the peer is null } message ResetPeerRequest2 { diff --git a/proto/common.proto b/proto/common.proto index 3cae9f9e65..0dc409b609 100644 --- a/proto/common.proto +++ b/proto/common.proto @@ -21,13 +21,13 @@ package curve.common; option cc_generic_services = true; option go_package = "proto/common"; -// 1. braft场景: id不使用,address为braft里面的PeerId,格式为{ip}:{port}:{index} -// 2. curve-raft场景:id是peer id,address为{ip}:{port} -// 当前chunkserver id就是peer id +// 1. In the braft scenario: 'id' is not used, and 'address' is the PeerId within braft, in the format {ip}:{port}:{index}. +// 2. In the curve-raft scenario: 'id' represents the peer id, and 'address' is in the format {ip}:{port}. +// The current chunkserver id is the peer id. message Peer { - optional uint64 id = 1; // peer id,全局唯一 -// optional bool isLearner = 2; // 是否是learner (暂时不支持) - optional string address = 3; // peer的地址信息 + optional uint64 id = 1; // Peer ID, globally unique +// optional bool isLearner = 2; // Whether it is a learner (not supported for now) + optional string address = 3; // Address information of the peer } message CopysetInfo { diff --git a/proto/copyset.proto b/proto/copyset.proto index fe3d271d53..10aab0485c 100755 --- a/proto/copyset.proto +++ b/proto/copyset.proto @@ -23,7 +23,7 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/copyset"; -// copyset epoch message,用于epoch序列化和反序列化 +// copyset epoch message for epoch serialization and deserialization message ConfEpoch { required uint32 logicPoolId = 1; required uint32 copysetId = 2; @@ -32,15 +32,15 @@ message ConfEpoch { } message CopysetRequest { - // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + // logicPoolId is actually uint16, but proto does not have uint16 required uint32 logicPoolId = 1; required uint32 copysetId = 2; - repeated string peerid = 3; // 当前复制组配置,可以为空 + repeated string peerid = 3; // The current replication group configuration can be empty }; enum COPYSET_OP_STATUS { COPYSET_OP_STATUS_SUCCESS = 0; - COPYSET_OP_STATUS_EXIST = 1; // copyset node 已经存在 + COPYSET_OP_STATUS_EXIST = 1; // copyset node already exists COPYSET_OP_STATUS_COPYSET_NOTEXIST = 2; COPYSET_OP_STATUS_FAILURE_UNKNOWN = 3; COPYSET_OP_STATUS_COPYSET_IS_HEALTHY = 4; @@ -48,7 +48,7 @@ enum COPYSET_OP_STATUS { message CopysetResponse { optional COPYSET_OP_STATUS status = 1; - optional string redirect = 2; // 自己不是 leader,重定向给 leader + optional string redirect = 2; // If not the leader, redirect to the leader. }; message Copyset { @@ -69,27 +69,27 @@ message CopysetStatusRequest { required uint32 logicPoolId = 1; required uint32 copysetId = 2; required common.Peer peer = 3; - required bool queryHash = 4; // 考虑到计算copyset hash值是一个非常耗时的操作,所以设置一个bool变量可以选择不查 + required bool queryHash = 4; // Considering that calculating the copyset hash value is a very time-consuming operation, setting a bool variable can choose not to check } -// 大部分字段只能是optional,因为copyset node可能不存在 +// Most fields can only be optional, as the copyset node may not exist message CopysetStatusResponse { - required COPYSET_OP_STATUS status = 1; // op状态 - optional uint32 state = 2; // copyset状态 + required COPYSET_OP_STATUS status = 1; // OP status + optional uint32 state = 2; // Copyset status optional common.Peer peer = 3; // peer optional common.Peer leader = 4; // leader - optional bool readOnly = 5; // 是否只读 - optional int64 term = 6; // 当前任期 - optional int64 committedIndex = 7; // 当前的committed index - optional int64 knownAppliedIndex = 8; // 当前copyset已知的applied index,当前peer可能未apply - optional int64 pendingIndex = 9; // 当前副本未决的op log index起始index - optional int64 pendingQueueSize = 10; // 当前副本未决的op log queue的长度 - optional int64 applyingIndex = 11; // 当前副本正在apply的op log index - optional int64 firstIndex = 12; // 当前副本第一条op log index(包括盘和memory) - optional int64 lastIndex = 13; // 当前副本最后一条op log index(包括盘和memory) - optional int64 diskIndex = 14; // 当前副本已经持久化的最大op log index(不包含memory) - optional uint64 epoch = 15; // 当前copyset配置版本 - optional string hash = 16; // 当前copyset的数据hash值 + optional bool readOnly = 5; // Read Only + optional int64 term = 6; // Current term of office + optional int64 committedIndex = 7; // Current committed index + optional int64 knownAppliedIndex = 8; // The current copyset has a known applied index, but the current peer may not have applied it + optional int64 pendingIndex = 9; // The open op log index starting index for the current replica + optional int64 pendingQueueSize = 10; // The length of the pending op log queue for the current replica + optional int64 applyingIndex = 11; // The current copy is applying the op log index + optional int64 firstIndex = 12; // The first op log index of the current replica (including disk and memory) + optional int64 lastIndex = 13; // The last op log index of the current replica (including disk and memory) + optional int64 diskIndex = 14; // The maximum op log index that the current replica has persisted (excluding memory) + optional uint64 epoch = 15; // Current copyset configuration version + optional string hash = 16; // The data hash value of the current copyset } service CopysetService { diff --git a/proto/heartbeat.proto b/proto/heartbeat.proto index d54723dfb8..292331defa 100644 --- a/proto/heartbeat.proto +++ b/proto/heartbeat.proto @@ -33,13 +33,13 @@ message CopySetInfo { required uint32 copysetId = 2; // copyset replicas, IP:PORT:ID, e.g. 127.0.0.1:8200:0 repeated common.Peer peers = 3; - // epoch, 用来标记配置变更,每变更一次,epoch会增加 + // epoch is used to mark configuration changes. Every time a change is made, epoch will increase required uint64 epoch = 4; - // 该复制组的leader + // The leader of this replication group required common.Peer leaderPeer = 5; - // 配置变更相关信息 + // Configuration change related information optional ConfigChangeInfo configChangeInfo = 6; - // copyset的性能信息 + // Performance information of copyset optional CopysetStatistics stats = 7; // whether the current copyset is on scaning optional bool scaning = 8; @@ -51,11 +51,11 @@ message CopySetInfo { message ConfigChangeInfo { required common.Peer peer = 1; - // 配置变更的类型 + // Types of configuration changes required ConfigChangeType type = 2; - // 配置变更是否成功 + // Whether the configuration change was successful required bool finished = 3; - // 变更的error信息 + // Changed error information optional CandidateError err = 4; }; @@ -81,13 +81,13 @@ message ChunkServerStatisticInfo { required uint32 writeRate = 2; required uint32 readIOPS = 3; required uint32 writeIOPS = 4; - // 已使用的chunk占用的磁盘空间 + // Disk space occupied by used chunks required uint64 chunkSizeUsedBytes = 5; - // chunkfilepool中未使用的chunk占用的磁盘空间 + // Disk space occupied by unused chunks in chunkfilepool required uint64 chunkSizeLeftBytes = 6; - // 回收站中chunk占用的磁盘空间 + // Disk space occupied by chunks in the recycle bin required uint64 chunkSizeTrashedBytes = 7; - // chunkfilepool的大小 + // The size of chunkfilepool optional uint64 chunkFilepoolSize = 8; }; @@ -100,27 +100,27 @@ message ChunkServerHeartbeatRequest { required DiskState diskState = 6; required uint64 diskCapacity = 7; required uint64 diskUsed = 8; - // 返回该chunk上所有copyset的信息 + // Returns information about all copysets on this chunk repeated CopySetInfo copysetInfos = 9; - // 时间窗口内该chunkserver上leader的个数 + // The number of leaders on this chunkserver within the time window required uint32 leaderCount = 10; - // 时间窗口内该chunkserver上copyset的个数 + // The number of copysets on this chunkserver within the time window required uint32 copysetCount = 11; - // chunkServer相关的统计信息 + // ChunkServer related statistical information optional ChunkServerStatisticInfo stats = 12; optional string version = 13; }; enum ConfigChangeType { - // 配置变更命令: leader转换 + // Configuration change command: leader conversion TRANSFER_LEADER = 1; - // 配置变更命令: 复制组增加一个成员 + // Configuration change command: Add a member to the replication group ADD_PEER = 2; - // 配置变更命令: 复制组删除一个成员 + // Configuration change command: Delete a member from a replication group REMOVE_PEER = 3; - // 配置变更命令: 没有配置变更 + // Configuration change command: No configuration changes NONE = 4; - // 配置变更命令:change复制组一个成员 + // Configuration change command: change a member of a replication group CHANGE_PEER = 5; // start scan on the peer START_SCAN_PEER = 6; @@ -134,40 +134,40 @@ message CopySetConf { repeated common.Peer peers = 3; required uint64 epoch = 4; optional ConfigChangeType type = 5; - // configchangeItem 是目标节点 - // 对于TRANSFER_LEADER: 表示目标节点; 对于ADD_PEER: 表示待加入节点 - // 对于REMOVE_PEER: 表示待删除节点; 对于CHANGE_PEER: 表示待加入节点 + // ConfigchangeItem is the target node + // For TRANSFER_LEADER: represents the target node; For ADD_PEER: indicates the node to be added + // For MOVE_PEER: represents the node to be deleted; For CHANGE_PEER: indicates the node to be added // SCAN_PEER: to scan the node optional common.Peer configchangeItem = 6; - // oldPeer, 这个只在ConfigChangeType=对于CHANGE_PEER的情况下会赋值, - // 表示待删除节点。 - // chunkserver收到CHANGE_PEER,根据peers,configchangeItem,oldPeer拼出新的conf + // OldPeer, this only applies to ConfigChangeType=for In the case of CHANGE_PEER, a value will be assigned, + // Represents a node to be deleted. + // Chunkserver received CHANGE_PEER, according to peers, configchangeItem, oldPeer, spell out a new conf optional common.Peer oldPeer = 7; }; enum HeartbeatStatusCode { - // 正常返回 + // Normal return hbOK = 0; - // 必要的参数为初始化 + // The necessary parameters are initialization hbParamUnInitialized = 1; - // chunkserver不在topology中 + // Chunkserver is not in topology hbChunkserverUnknown = 2; - // chunkserver状态为retired + // Chunkserver status is retired hbChunkserverRetired = 3; - // chunkserver的ip和port与topology中的不匹配 + // The IP and port of chunkserver do not match those in topology hbChunkserverIpPortNotMatch = 4; - // chunkserver的token不匹配 + // Chunkserver token mismatch hbChunkserverTokenNotMatch = 5; - // 无copyset上报 + // No copyset reported hbRequestNoCopyset = 6; - // copyset转换为topology格式失败 + // Copyset conversion to topology format failed hbAnalyseCopysetError = 7; } message ChunkServerHeartbeatResponse { - // 返回需要进行变更的copyset的信息 + // Returns information about the copyset that needs to be changed repeated CopySetConf needUpdateCopysets = 1; - // 错误码 + // Error code optional HeartbeatStatusCode statusCode = 2; }; diff --git a/proto/nameserver2.proto b/proto/nameserver2.proto index 85947d96ad..57b8a80c3d 100644 --- a/proto/nameserver2.proto +++ b/proto/nameserver2.proto @@ -31,18 +31,18 @@ enum FileType { }; enum FileStatus { - // 文件创建完成 + // File creation completed kFileCreated = 0; - // 文件删除中 + // Deleting files kFileDeleting = 1; - // 文件正在克隆 + // File is being cloned kFileCloning = 2; - // 文件元数据安装完毕 + // File metadata installation completed kFileCloneMetaInstalled = 3; - // 文件克隆完成 + // File cloning completed kFileCloned = 4; - // 文件正在被克隆 + // The file is being cloned kFileBeingCloned = 5; } @@ -78,15 +78,15 @@ message FileInfo { optional uint64 ctime = 9; optional uint64 seqNum = 10; optional FileStatus fileStatus = 11; - //用于文件转移到回收站的情况下恢复场景下的使用, - //RecycleBin(回收站)目录下使用/其他场景下不使用 + // Used to restore usage in scenarios where files are transferred to the recycle bin, + // Used in the RecycleBin directory/not used in other scenarios optional string originalFullPathName = 12; - // cloneSource 当前用于存放克隆源(当前主要用于curvefs) - // 后期可以考虑存放 s3相关信息 + // CloneSource is currently used to store clone sources (currently mainly used for curvefs) + //Later on, we can consider storing s3 related information optional string cloneSource = 13; - // cloneLength 克隆源文件的长度,用于clone过程中进行extent + // CloneLength The length of the clone source file used for extension during the clone process optional uint64 cloneLength = 14; optional uint64 stripeUnit = 15; optional uint64 stripeCount = 16; @@ -99,68 +99,68 @@ message FileInfo { // status code enum StatusCode { - // 执行成功 + // Execution successful kOK = 0; - // 文件已存在 + // File already exists kFileExists = 101; - // 文件不存在 + // File does not exist kFileNotExists = 102; - // 非目录类型 + // Non directory type kNotDirectory = 103; - // 传入参数错误 + // Incoming parameter error kParaError = 104; - // 缩小文件,目前不支持缩小文件 + // Shrinking files, currently not supported kShrinkBiggerFile = 105; - // 扩容单位错误,非segment size整数倍 + // Expansion unit error, not an integer multiple of segment size kExtentUnitError = 106; - // segment未分配 + // Segment not allocated kSegmentNotAllocated = 107; - // segment分配失败 + // Segment allocation failed kSegmentAllocateError = 108; - // 目录不存在 + // Directory does not exist kDirNotExist = 109; - // 功能不支持 + // Function not supported kNotSupported = 110; - // owner认证失败 + // Owner authentication failed kOwnerAuthFail = 111; - // 目录非空 + // Directory is not empty kDirNotEmpty = 112; - // 文件已处于快照中 + // The file is already in a snapshot kFileUnderSnapShot = 120; - // 文件不在快照中 + // The file is not in the snapshot kFileNotUnderSnapShot = 121; - // 快照删除中 + // Snapshot deletion in progress kSnapshotDeleting = 122; - // 快照文件不存在 + // The snapshot file does not exist kSnapshotFileNotExists = 123; - // 快照文件删除失败 + // Snapshot file deletion failed kSnapshotFileDeleteError = 124; - // session不存在 + // Session does not exist kSessionNotExist = 125; - // 文件已被占用 + // The file is already in use kFileOccupied = 126; kCloneFileNameIllegal = 127; kCloneStatusNotMatch = 128; - // 文件删除失败 + // File deletion failed kCommonFileDeleteError = 129; - // 文件id不匹配 + // File ID mismatch kFileIdNotMatch = 130; - // 文件在删除中 + // The file is being deleted kFileUnderDeleting = 131; - // 文件长度不符合要求 + // The file length does not meet the requirements kFileLengthNotSupported = 132; - // 文件正在被克隆 + // The file is being cloned kDeleteFileBeingCloned = 133; - // client版本不匹配 + // Client version mismatch kClientVersionNotMatch = 134; - // snapshot功能禁用中 + // The snapshot function is disabled kSnapshotFrozen = 135; - // 快照克隆服务连不上 + // The snapshot clone service cannot be connected kSnapshotCloneConnectFail = 136; - // 快照克隆服务未初始化 + // The snapshot clone service is not initialized kSnapshotCloneServerNotInit = 137; // recover file status is CloneMetaInstalled kRecoverFileCloneMetaInstalled = 138; @@ -170,9 +170,9 @@ enum StatusCode { kEpochTooOld = 140; // poolset doesn't exist kPoolsetNotExist = 141; - // 元数据存储错误 + // Metadata storage error kStorageError = 501; - // 内部错误 + // Internal error KInternalError = 502; }; @@ -311,20 +311,20 @@ message ExtendFileResponse { } message ChangeOwnerRequest { - // 需要变更owner的文件的fileName + // Need to change the fileName of the owner's file required string fileName = 1; - // 希望文件owner变更后的新的owner + // Hope the new owner after the file owner changes required string newOwner = 2; - // ChangerOwner接口只能通过root权限进行调用,需要传入root权限的owner + // The ChangerOwner interface can only be called with root permission, and an owner with root permission needs to be passed in required string rootOwner = 3; - // 对root身份进行校验的的signature + // The signature for verifying the root identity required string signature = 4; - // 用来在mds端重新计算signature + // Used to recalculate the signature on the mds side required uint64 date = 5; } -// 返回ChangeOwner的执行结果,成功返回statusCode::kOK -// 失败可能返回kFileNotExists、kOwnerAuthFail、kFileOccupied、kStorageError等,可能返回的错误码将来继续补充 +// Returns the execution result of ChangeOwner, successfully returning statusCode::kOK +// Failure may return kFileNotExists, kOwnerAuthFail, kFileOccupied, kStorageError, etc. The error codes that may be returned will continue to be supplemented in the future message ChangeOwnerResponse { required StatusCode statusCode = 1; } @@ -395,8 +395,8 @@ message CheckSnapShotStatusRequest { required uint64 date = 5; } -// statusCode为kOK时,fileStatus和progress才会赋值 -// 只有fileStatus是kFileDeleting时,progress表示快照文件删除进度,否则progress返回0 +// FileStatus and progress are only assigned values when statusCode is kOK +// Only when fileStatus is kFileDeleting, progress represents the progress of snapshot file deletion, otherwise progress returns 0 message CheckSnapShotStatusResponse { required StatusCode statusCode = 1; optional FileStatus fileStatus = 2; @@ -431,7 +431,7 @@ message OpenFileRequest { optional string clientVersion = 5; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -456,7 +456,7 @@ message CloseFileRequest { optional uint32 clientPort = 7; }; -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -478,7 +478,7 @@ message ReFreshSessionRequest { optional uint32 clientPort = 8; } -// statusCode返回值,详见StatusCode定义: +// The return value of statusCode is detailed in the definition of StatusCode: // StatusCode::kOK // StatusCode::kFileNotExists // StatusCode::kStorageError @@ -531,9 +531,9 @@ message GetAllocatedSizeRequest { message GetAllocatedSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的分配大小 + // Allocation size of files or directories optional uint64 allocatedSize = 2; - // key是逻辑池id,value是分配大小 + // Key is the logical pool id, and value is the allocation size map allocSizeMap = 3; } @@ -543,7 +543,7 @@ message GetFileSizeRequest { message GetFileSizeResponse { required StatusCode statusCode = 1; - // 文件或目录的file length + // The file length of a file or directory optional uint64 fileSize = 2; } diff --git a/proto/schedule.proto b/proto/schedule.proto index 2dde693556..9c92bb4ef5 100644 --- a/proto/schedule.proto +++ b/proto/schedule.proto @@ -34,7 +34,7 @@ message RapidLeaderScheduleResponse { required sint32 statusCode = 1; } -// 如果chunkServerID为空,则返回所有chunkserver的恢复状态 +// If chunkServerID is empty, return the recovery status of all chunkservers message QueryChunkServerRecoverStatusRequest { repeated uint32 chunkServerID = 1; } diff --git a/proto/topology.proto b/proto/topology.proto index 2057cafe2a..9e002f2c3d 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -415,7 +415,7 @@ message CreateLogicalPoolRequest { required LogicalPoolType type = 4; required bytes redundanceAndPlaceMentPolicy = 5; //json body required bytes userPolicy = 6; //json body - optional uint32 scatterWidth = 7; //生成copyset依据的scatterWidth平均值 + optional uint32 scatterWidth = 7; // Generate copyset based on the average scatterWidth value optional AllocateStatus status = 8; } diff --git a/robot/Resources/keywords/deploy.py b/robot/Resources/keywords/deploy.py index 93d7926a45..0152bdafb1 100644 --- a/robot/Resources/keywords/deploy.py +++ b/robot/Resources/keywords/deploy.py @@ -77,7 +77,7 @@ def add_config(): ori_cmd = R"sed -i 's/mds.listen.addr=127.0.0.1:6666/mds.listen.addr=%s/g' client.conf"%(addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0,"change host %s client config fail"%host -#将client.conf配置成py_client.conf(主机用),方便client复现死锁问题 + # Configure client.conf to py_client.conf(for the host) to facilitate client replication of deadlock issues ori_cmd = "sudo mv client.conf /etc/curve/" rs = shell_operator.ssh_exec(ssh, ori_cmd) ori_cmd = "sudo cp /etc/curve/client.conf /etc/curve/py_client.conf" @@ -157,11 +157,11 @@ def add_config(): ori_cmd = "sed -i \"s/client.config_path=\S*/client.config_path=\/etc\/curve\/snap_client.conf/\" snapshot_clone_server.conf" rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改snapshot_clone_server.conf etcd配置 + #Modify snapshot_clone_server.conf etcd configuration ori_cmd = "sed -i \"s/etcd.endpoint=\S*/etcd.endpoint=%s/g\" snapshot_clone_server.conf"%(etcd_addrs) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0,"change host %s snapshot config fail"%host - #修改数据库配置项 + # Modifying Database Configuration Items ori_cmd = R"sed -i 's/metastore.db_address=\S*/metastore.db_address=%s/g' snapshot_clone_server.conf"%(config.abnormal_db_host) rs = shell_operator.ssh_exec(ssh, ori_cmd) assert rs[3] == 0,"change host %s snapshot clone server config fail"%host diff --git a/robot/Resources/keywords/fault_inject.py b/robot/Resources/keywords/fault_inject.py index 48e95382c4..151c695b8b 100644 --- a/robot/Resources/keywords/fault_inject.py +++ b/robot/Resources/keywords/fault_inject.py @@ -1796,7 +1796,7 @@ def test_mds_clock_offset(offset): inject_clock_offset(ssh,offset) return ssh -#使用cycle会从掉电到上电有1秒钟的间隔 +# There is a 1-second interval from power down to power up when using cycle def test_ipmitool_restart_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) logger.info("|------begin test chunkserver ipmitool cycle,host %s------|"%(chunkserver_host)) @@ -1832,7 +1832,7 @@ def test_ipmitool_restart_client(): time.sleep(5) assert status,"restart host %s fail"%client_host -#使用reset从掉电到上电没有间隔 +# There is no interval between power-off and power-on when using reset def test_ipmitool_reset_chunkserver(): chunkserver_host = random.choice(config.chunkserver_reset_list) logger.info("|------begin test chunkserver ipmitool reset,host %s------|"%(chunkserver_host)) @@ -2103,10 +2103,10 @@ def clean_curve_data(): def do_thrasher(action): #start level1 if type(action) is types.StringType: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX"%action) + logger.debug("Startup FailureXXXXXXXXXXXXXXXXXXX %s XXXXXXXXXXXXXXXXXXXXXXXXX"%action) globals()[action]() else: - logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX"%(action[0],str(action[1]))) + logger.debug("Startup FailureXXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX"%(action[0],str(action[1]))) globals()[action[0]](action[1]) def start_retired_and_down_chunkservers(): diff --git a/robot/Resources/keywords/snapshot_operate.py b/robot/Resources/keywords/snapshot_operate.py index f21c2be296..8ba133c5fd 100644 --- a/robot/Resources/keywords/snapshot_operate.py +++ b/robot/Resources/keywords/snapshot_operate.py @@ -107,8 +107,8 @@ def snapshot_create_with_empty_str_file(file_name=" ", user_name=config.user_nam return rc -# "特殊字符`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" -# "特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[];',./ ~!@#$%^&*()_+{}|:\"<>?" +# "Special Characters`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?" def snapshot_create_with_special_file_name(file_name="/特殊 字符`-=[]\\;',./ ~!@#$%^&*()_+{}|:\"<>?", user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() @@ -131,7 +131,7 @@ def get_sanpshot_info(seq): return finfo -# 创建并获取快照文件信息 +# Create and obtain snapshot file information def create_snapshot_and_get_snapshot_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): client = snapshot_client.CurveSnapshot() @@ -141,7 +141,7 @@ def create_snapshot_and_get_snapshot_info(file_name=config.snapshot_file_name, u return finfo -# 正常获取快照文件分配信息 +# Obtain snapshot file allocation information normally def get_normal_snapshot_segment_info(file_name=config.snapshot_file_name, user_name=config.user_name, password=config.pass_word): seq = snapshot_normal_create(file_name, user_name, password) @@ -159,7 +159,7 @@ def get_normal_chunk_info(file_name=config.snapshot_file_name, user_name=config. client = snapshot_client.CurveSnapshot() chunkinfo = client.get_chunk_Info(seginfo.chunkvec[0]) logger.info("get_normal_chunkInfo chunkInfo info = %s" % chunkinfo) - return chunkinfo # 可以对chunInfo.chunkSn进行断言验证 + return chunkinfo # Can perform assertion validation on chunInfo.chunkSn def get_chunk_info_with_chunk_id_info(idinfo): diff --git a/robot/curve_choas.txt b/robot/curve_choas.txt index ff39c335e5..0f9b389152 100644 --- a/robot/curve_choas.txt +++ b/robot/curve_choas.txt @@ -37,7 +37,7 @@ test one volume perf stop rwio perf test -#启动大压力情况下的混沌测试:分等级进行随机故障注入。每次注入完成后恢复集群所有业务,目前设置100次的全流程注入 +# Conduct chaos testing under high stress: Inject faults of various levels randomly. Restore all cluster operations after each injection. Currently set for 100 rounds of full injection inject cluster chaos test [Tags] P2 chaos longtime @@ -47,17 +47,17 @@ inject cluster chaos test ${num} evaluate int(10) init create curve vm ${num} :FOR ${i} IN RANGE 10 - log "启动第"${i}"轮故障" + log "Starting Round "${i}" of Fault Injection" ${choas1} evaluate random.choice($choas_level1) random - log "开始启动一级故障" + log "Starting Level 1 Fault" do thrasher ${choas1} sleep 30 ${choas2} evaluate random.choice($choas_level2) random - log "开始启动二级故障" + log "Starting Level 2 Fault" do thrasher ${choas2} sleep 30 ${choas3} evaluate random.choice($choas_level3) random - log "开始启动三级故障" + log "Starting Level 3 Fault" do thrasher ${choas3} sleep 30 clean env diff --git a/robot/curve_robot.txt b/robot/curve_robot.txt index 8709a96b6e..9f49ca2caa 100644 --- a/robot/curve_robot.txt +++ b/robot/curve_robot.txt @@ -1628,7 +1628,7 @@ test kill chunkserver one check loop read ${new_fd} [Teardown] file clean ${new_fd} -# create snapshot 相关用例 +# Create snapshot related use cases create snapshot with notexist file [Tags] P0 base first release test-snapshot @@ -1698,7 +1698,7 @@ create snapshot with nomal file and check first chunk snapshot [Teardown] delete curve file for shanpshot -# 创建文件->写文件->创建快照->修改文件->读快照验证(修改前数据)->删除重新快照->验证快照数据(修改后数据) +# Create file ->Write file ->Create snapshot ->Modify file ->Read snapshot verification (data before modification) ->Delete re snapshot ->Verify snapshot data (data after modification) create snapshot and check chunk snapshot after cow [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -1744,7 +1744,7 @@ create snapshot repeat should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsnapshot info 用例 +# Getsnapshot info use case get empty file snapshot info [Tags] P0 base first release test-snapshot @@ -1871,7 +1871,7 @@ delete snapshoting curve file should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# getsegmentinfo 相关用例 +# Use cases related to getsegmentinfo check snapshot segmentinfo after modify file [Tags] P0 base first release test-snapshot @@ -1981,7 +1981,7 @@ get empty file snapshot segmentinfo should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# snapshot chunkinfo 用例验证 +# Snapshot chunkinfo use case validation check empty file snapshot chunkinfo after modify file [Tags] P0 base first release test-snapshot @@ -2038,10 +2038,10 @@ get snapshot chunkinfo with notexist chunidinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} - #TODO: 此处需要判断错误,当前是死循环,不停轮询查询id信息 + # TODO: An error needs to be determined here. Currently, it is a dead loop and constantly polls for ID information ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2063,7 +2063,7 @@ check snapshot chunkinfo after delete snapshot ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} - # 此处应该再重新获取下segmentinfo, chunkvec[0]应该不存在 + # We should retrieve segmentinfo again here, chunkvec [0] should not exist ${chunkinfo} get chunk info with chunk id info ${seginfo.chunkvec[0]} should be equal ${chunkinfo.snSize.value} ${expect_size} should be equal ${chunkinfo.chunkSn[0]} ${expect_first_sn} @@ -2071,7 +2071,7 @@ check snapshot chunkinfo after delete snapshot [Teardown] delete curve file for shanpshot -# read snapshot chunk 用例 CLDCFS-1249 +# Read snapshot chunk use case CLDCFS-1249 read snapshot chunk with notexist idinfo [Tags] P0 base first release no-need @@ -2081,10 +2081,10 @@ read snapshot chunk with notexist idinfo write curve file for snapshot ${seq} snapshot normal create ${seginfo} get snapshot first segment info ${seq} - # 修改chunkidinfo + # Modify chunkidinfo ${seginfo.chunkvec[0].cpid_.value} evaluate int(66) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # TODO:当前客户端死循环打印错误,此处校验结果应该返回错误 + # TODO: The current client has a loop printing error, and the verification result should return an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2101,7 +2101,7 @@ read snapshot chunk with error seq ${seginfo} get snapshot first segment info ${seq} ${seq.value} evaluate int(8) ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处校验结果应该返回错误 + # The verification result should return an error here ${expect_rst} evaluate int(-6) should be equal ${content} ${expect_rst} ${seq.value} evaluate int(1) @@ -2110,7 +2110,7 @@ read snapshot chunk with error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 查询快照状态用例 +# Query snapshot status use case check empty file snapshot status [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2197,7 +2197,7 @@ check snapshot status use error seq [Teardown] delete curve file for shanpshot -# 删除快照相关用例 +# Delete snapshot related use cases repeat delete snapshot [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2230,7 +2230,7 @@ delete snapshot use error seq should be equal ${rs} ${expect_rc} [Teardown] delete curve file for shanpshot -# 删除chunk快照(当前无限重试,需要调用方设置重试次数) CLDCFS-1254 +# Delete chunk snapshot (currently infinite retries, caller needs to set retry count) CLDCFS-1254 delete chunk snapshot with snapshot seq [Tags] P0 base first release no-need ${rc} create curve file for snapshot @@ -2243,7 +2243,7 @@ delete chunk snapshot with snapshot seq ${rc} delete chunk snapshot with correct sn ${seginfo.chunkvec[0]} ${seq} should be equal ${rc} ${expect_rc} ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} - # 此处判断返回结果是否为错误 + # Determine whether the returned result is an error here ${rs} delete file snapshot ${seq} sleep 2 should be equal ${rs} ${expect_rc} @@ -2291,8 +2291,8 @@ repeat delete chunk snapshot [Teardown] delete curve file for shanpshot -# 创建clone&recover -# 步骤:创建文件、写文件、创建快照记录seq,触发cow,获取快照信息(版本号),createclonechunk(指定s3上对象,correctedseq=快照seq),恢复快照,验证chunk数据是否为s3数据 +# Create clone&recover +# Steps: Create a file, write a file, create a snapshot record seq, trigger Cow, obtain snapshot information (version number), create clonechunk (specify an object on s3, correctedseq=snapshot seq), restore the snapshot, verify if the chunk data is s3 data create clone and recover chunk [Tags] P0 base first release test-snapshot ${rc} create curve file for snapshot @@ -2311,7 +2311,7 @@ create clone and recover chunk should be equal ${rc} ${expect_rc} ${rc} recover chunk data ${seginfo.chunkvec[0]} should be equal ${rc} ${expect_rc} - # check数据 + # Check data ${content} read chunk snapshot ${seginfo.chunkvec[0]} ${seq} ${expect_content} evaluate str("aaaaaaaa")*512 should be equal ${content} ${expect_content} diff --git a/src/chunkserver/chunk_closure.cpp b/src/chunkserver/chunk_closure.cpp index 09b259ae7e..4cdaf8e335 100644 --- a/src/chunkserver/chunk_closure.cpp +++ b/src/chunkserver/chunk_closure.cpp @@ -28,21 +28,21 @@ namespace chunkserver { void ChunkClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); /** - * 确保done能够被调用,目的是保证rpc一定会返回 + * Ensure that done can be called to ensure that rpc will definitely return */ brpc::ClosureGuard doneGuard(request_->Closure()); /** - * 尽管在request propose给copyset的之前已经 - * 对leader身份进行了确认,但是在copyset处理 - * request的时候,当前copyset的身份还是有可能 - * 变成非leader,所以需要判断ChunkClosure被调 - * 用的时候,request的status,如果 ok,说明是 - * 正常的apply处理,否则将请求转发 + * Although before the request proposal was given to the copyset + * Confirmed the identity of the leader, but processed it in copyset + * When requesting, it is still possible to determine the identity of the current copyset + * Becoming a non leader, so it needs to be determined that ChunkClosure has been adjusted + * When using, the status of the request. If it is OK, it indicates that it is + * Normal apply processing, otherwise the request will be forwarded */ if (status().ok()) { return; diff --git a/src/chunkserver/chunk_closure.h b/src/chunkserver/chunk_closure.h index e2d76b7174..f333b0d7b2 100755 --- a/src/chunkserver/chunk_closure.h +++ b/src/chunkserver/chunk_closure.h @@ -33,11 +33,11 @@ namespace curve { namespace chunkserver { /** - * 携带op request的所有上下文的closure,通过braft::Task传递给raft处理, - * 调用会有两个地方: - * 1.op request正常的被raft处理,最后on apply的时候会调用返回 - * 2.op request被打包给raft处理之后,但是还没有来得及处理就出错了,例如leader - * step down变为了非leader,那么会明确的提前向client返回错误 + * Carry all the contextual closures of the op request and pass them to the raft for processing through the braft::Task, + * There are two places to call: + * 1. The op request is processed normally by the raft, and will be called and returned when it is finally applied + * 2. After the op request was packaged for raft processing, an error occurred before it could be processed, such as leader + * If the step down becomes a non leader, it will explicitly return an error to the client in advance */ class ChunkClosure : public braft::Closure { public: @@ -49,7 +49,7 @@ class ChunkClosure : public braft::Closure { void Run() override; public: - // 包含了op request 的上下文信息 + // Contains contextual information for op request std::shared_ptr request_; }; diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index d6e9034641..b5065ccdd3 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -76,7 +76,7 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -134,7 +134,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, } } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "I/O request, op: " << request->optype() @@ -144,7 +144,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -184,7 +184,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 请求创建的chunk大小和copyset配置的大小不一致 + // The chunk size requested for creation does not match the size configured for copyset if (request->size() != maxChunkSize_) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "Invalid chunk size: " << request->optype() @@ -193,7 +193,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -244,7 +244,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -254,7 +254,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -295,7 +295,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -305,7 +305,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -315,7 +315,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // RecoverChunk请求和ReadChunk请求共用ReadChunkRequest + // RecoverChunk request and ReadChunk request share ReadChunkRequest std::shared_ptr req = std::make_shared(nodePtr, chunkServiceOptions_.cloneManager, @@ -347,13 +347,13 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -401,7 +401,7 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -423,9 +423,9 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( } /** - * 当前GetChunkInfo在rpc service层定义和Chunk Service分离的, - * 且其并不经过QoS或者raft一致性协议,所以这里没有让其继承 - * OpRequest或者QoSRequest来重新封装,而是直接原地处理掉了 + * Currently, GetChunkInfo is defined in the rpc service layer and separated from Chunk Service, + * And it does not go through QoS or raft consistency protocols, so it is not allowed to inherit here + * OpRequest or QoSRequest to be re encapsulated, but directly processed in place */ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, const GetChunkInfoRequest *request, @@ -449,7 +449,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); @@ -460,7 +460,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 检查任期和自己是不是Leader + // Check tenure and whether you are a leader if (!nodePtr->IsLeaderTerm()) { PeerId leader = nodePtr->GetLeaderId(); if (!leader.is_empty()) { @@ -476,16 +476,16 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, ret = nodePtr->GetDataStore()->GetChunkInfo(request->chunkid(), &chunkInfo); if (CSErrorCode::Success == ret) { - // 1.成功,此时chunk文件肯定存在 + // 1. Success, the chunk file must exist at this time response->add_chunksn(chunkInfo.curSn); if (chunkInfo.snapSn > 0) response->add_chunksn(chunkInfo.snapSn); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回的版本集合为空 + // 2. Chunk file does not exist, returned version set is empty response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk info failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -504,7 +504,7 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, (void)controller; brpc::ClosureGuard doneGuard(done); - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->length())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "GetChunkHash illegal parameter:" @@ -517,7 +517,7 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); @@ -537,15 +537,15 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, &hash); if (CSErrorCode::Success == ret) { - // 1.成功 + // 1. Success response->set_hash(hash); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回0的hash值 + // 2. Chunk file does not exist, return a hash value of 0 response->set_hash("0"); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk hash failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -579,7 +579,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, bool ChunkServiceImpl::CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const { - // 检查offset+len是否越界 + // Check if offset+len is out of range if (CURVE_UNLIKELY(offset + len > maxChunkSize_)) { return false; } diff --git a/src/chunkserver/chunk_service.h b/src/chunkserver/chunk_service.h index e15aea389b..ab8575e4e6 100755 --- a/src/chunkserver/chunk_service.h +++ b/src/chunkserver/chunk_service.h @@ -100,10 +100,10 @@ class ChunkServiceImpl : public ChunkService { private: /** - * 验证op request的offset和length是否越界和对齐 + * Verify if the offset and length of the op request are out of bounds and aligned * @param offset[in]: op request' offset * @param len[in]: op request' length - * @return true,说明合法,否则返回false + * @return true indicates legality, otherwise false is returned */ bool CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const; diff --git a/src/chunkserver/chunk_service_closure.cpp b/src/chunkserver/chunk_service_closure.cpp index d680b37d93..ccef25a3c3 100644 --- a/src/chunkserver/chunk_service_closure.cpp +++ b/src/chunkserver/chunk_service_closure.cpp @@ -30,32 +30,32 @@ namespace chunkserver { void ChunkServiceClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * Automatically deconstruct oneself after the end of Run, which can avoid + * Destructor omission */ std::unique_ptr selfGuard(this); { - // 所有brpcDone_调用之前要做的操作都放到这个生命周期内 + // All brpcDone_ All operations that need to be done before calling are placed within this lifecycle brpc::ClosureGuard doneGuard(brpcDone_); - // 记录请求处理结果,收集到metric中 + // Record the request processing results and collect them in metric OnResonse(); } - // closure调用的时候减1,closure创建的什么加1 - // 这一行必须放在brpcDone_调用之后,ut里需要测试inflightio超过限制时的表现 - // 会在传进来的closure里面加一个sleep来控制inflightio个数 + // When calling the closure, subtract 1, and add 1 to what the closure creates + // This line must be placed in brpcDone_ After calling, UT needs to test the performance of inflightio when it exceeds the limit + // Will add a sleep to the incoming closure to control the number of inflightio if (nullptr != inflightThrottle_) { inflightThrottle_->Decrement(); } } void ChunkServiceClosure::OnRequest() { - // 如果request或者response为空就不统计metric + // If request or response is empty, metric will not be counted if (request_ == nullptr || response_ == nullptr) return; - // 根据request类型统计请求数量 + // Count the number of requests based on their type ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { @@ -88,18 +88,18 @@ void ChunkServiceClosure::OnRequest() { } void ChunkServiceClosure::OnResonse() { - // 如果request或者response为空就不统计metric + // If request or response is empty, metric will not be counted if (request_ == nullptr || response_ == nullptr) return; - // 可以根据response中的返回值来统计此次请求的处理结果 + // The processing result of this request can be calculated based on the return value in the response ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); bool hasError = false; uint64_t latencyUs = common::TimeUtility::GetTimeofDayUs() - receivedTimeUs_; switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - // 如果是read请求,返回CHUNK_OP_STATUS_CHUNK_NOTEXIST也认为是正确的 + // If it is a read request, return CHUNK_OP_STATUS_CHUNK_NOTEXIST also believes that it is correct hasError = (response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && (response_->status() diff --git a/src/chunkserver/chunk_service_closure.h b/src/chunkserver/chunk_service_closure.h index b6dc7d4d65..afa921b935 100755 --- a/src/chunkserver/chunk_service_closure.h +++ b/src/chunkserver/chunk_service_closure.h @@ -34,7 +34,7 @@ namespace curve { namespace chunkserver { -// chunk service层的闭包,对rpc的闭包再做一层封装,用于请求返回时统计metric信息 +// The closure of the chunk service layer encapsulates the closure of the rpc layer, which is used to count metric information when requesting returns class ChunkServiceClosure : public braft::Closure { public: explicit ChunkServiceClosure( @@ -47,43 +47,43 @@ class ChunkServiceClosure : public braft::Closure { , response_(response) , brpcDone_(done) , receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { - // closure创建的什么加1,closure调用的时候减1 + // What does the closure create add 1, and when the closure is called, subtract 1 if (nullptr != inflightThrottle_) { inflightThrottle_->Increment(); } - // 统计请求数量 + // Count the number of requests OnRequest(); } ~ChunkServiceClosure() = default; /** - * 该闭包的guard生命周期结束时会调用该函数 - * 该函数内目前主要是对读写请求返回结果的一些metric统计 - * 后面如果有类似的场景(在service请求结束时做一些处理)可以在内部添加逻辑 + * This function will be called at the end of the guard lifecycle of the closure + * Currently, this function mainly performs some metric statistics on the returned results of read and write requests + * If there are similar scenarios in the future (doing some processing at the end of the service request), logic can be added internally */ void Run() override; private: /** - * 统计请求数量和速率 + * Count the number and rate of requests */ void OnRequest(); /** - * 记录请求处理的结果,例如请求是否出错、请求的延时等 + * Record the results of request processing, such as whether the request was incorrect, the delay of the request, etc */ void OnResonse(); private: - // inflight流控 + // inflight flow control std::shared_ptr inflightThrottle_; - // rpc请求的request + // Request for rpc requests const ChunkRequest *request_; - // rpc请求的response + // Response to rpc requests ChunkResponse *response_; - // rpc请求回调 + // Rpc request callback google::protobuf::Closure *brpcDone_; - // 接受到请求的时间 + // Time of receiving the request uint64_t receivedTimeUs_; }; diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 07f1f48d5f..47de48fb5b 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -87,44 +87,44 @@ int ChunkServer::Run(int argc, char** argv) { RegisterCurveSegmentLogStorageOrDie(); - // ==========================加载配置项===============================// + // ==========================Load Configuration Items===============================// LOG(INFO) << "Loading Configuration."; common::Configuration conf; conf.SetConfigPath(FLAGS_conf.c_str()); - // 在从配置文件获取 + // Obtaining from the configuration file LOG_IF(FATAL, !conf.LoadConfig()) << "load chunkserver configuration fail, conf path = " << conf.GetConfigPath(); - // 命令行可以覆盖配置文件中的参数 + // The command line can override parameters in the configuration file LoadConfigFromCmdline(&conf); - // 初始化日志模块 + // Initialize Log Module google::InitGoogleLogging(argv[0]); - // 打印参数 + // Print parameters conf.PrintConfig(); conf.ExposeMetric("chunkserver_config"); curve::common::ExposeCurveVersion(); - // ============================初始化各模块==========================// + // ============================nitialize each module==========================// LOG(INFO) << "Initializing ChunkServer modules"; - // 优先初始化 metric 收集模块 + // Prioritize initializing the metric collection module ChunkServerMetricOptions metricOptions; InitMetricOptions(&conf, &metricOptions); ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); LOG_IF(FATAL, metric->Init(metricOptions) != 0) << "Failed to init chunkserver metric."; - // 初始化并发持久模块 + // Initialize concurrent persistence module ConcurrentApplyModule concurrentapply; ConcurrentApplyOption concurrentApplyOptions; InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) << "Failed to initialize concurrentapply module!"; - // 初始化本地文件系统 + // Initialize local file system std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); LocalFileSystemOption lfsOption; @@ -133,7 +133,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, 0 != fs->Init(lfsOption)) << "Failed to initialize local filesystem module!"; - // 初始化chunk文件池 + // Initialize chunk file pool FilePoolOptions chunkFilePoolOptions; InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); std::shared_ptr chunkfilePool = @@ -170,14 +170,14 @@ int ChunkServer::Run(int argc, char** argv) { } } - // 远端拷贝管理模块选项 + // Remote Copy Management Module Options CopyerOptions copyerOptions; InitCopyerOptions(&conf, ©erOptions); auto copyer = std::make_shared(); LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) << "Failed to initialize clone copyer."; - // 克隆管理模块初始化 + // Clone Management Module Initialization CloneOptions cloneOptions; InitCloneOptions(&conf, &cloneOptions); uint32_t sliceSize; @@ -189,7 +189,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) << "Failed to initialize clone manager."; - // 初始化注册模块 + // Initialize registration module RegisterOptions registerOptions; InitRegisterOptions(&conf, ®isterOptions); registerOptions.useChunkFilePoolAsWalPoolReserve = @@ -202,7 +202,7 @@ int ChunkServer::Run(int argc, char** argv) { Register registerMDS(registerOptions); ChunkServerMetadata metadata; ChunkServerMetadata localMetadata; - // 从本地获取meta + // Get Meta from Local std::string metaPath = UriParser::GetPathFromUri( registerOptions.chunkserverMetaUri); @@ -217,7 +217,7 @@ int ChunkServer::Run(int argc, char** argv) { &localMetadata, &metadata, epochMap) != 0) << "Failed to register to MDS."; } else { - // 如果本地获取不到,向mds注册 + // If it cannot be obtained locally, register with MDS LOG(INFO) << "meta file " << metaPath << " do not exist, register to mds"; LOG_IF(FATAL, registerMDS.RegisterToMDS( @@ -225,7 +225,7 @@ int ChunkServer::Run(int argc, char** argv) { << "Failed to register to MDS."; } - // trash模块初始化 + // Trash module initialization TrashOptions trashOptions; InitTrashOptions(&conf, &trashOptions); trashOptions.localFileSystem = fs; @@ -235,7 +235,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; - // 初始化复制组管理模块 + // Initialize replication group management module CopysetNodeOptions copysetNodeOptions; InitCopysetNodeOptions(&conf, ©setNodeOptions); copysetNodeOptions.concurrentapply = &concurrentapply; @@ -256,16 +256,16 @@ int ChunkServer::Run(int argc, char** argv) { } } - // install snapshot的带宽限制 + // Bandwidth limitation of install snapshot int snapshotThroughputBytes; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", &snapshotThroughputBytes)); /** - * checkCycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, - * checkCycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 - * 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 - * 不是20MB的带宽 + * CheckCycles is used for finer bandwidth control, with snapshotThroughputBytes=100MB, + * Taking checkCycles=10 as an example, it can ensure a bandwidth of 10MB every 1/10 second without accumulation, such as the first one + * The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and + * Not a bandwidth of 20MB */ int checkCycles; LOG_IF(FATAL, @@ -282,7 +282,7 @@ int ChunkServer::Run(int argc, char** argv) { return -1; } butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); - // 注册curve snapshot storage + // Register curve snapshot storage RegisterCurveSnapshotStorageOrDie(); CurveSnapshotStorage::set_server_addr(endPoint); copysetNodeManager_ = &CopysetNodeManager::GetInstance(); @@ -296,7 +296,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) << "Failed to init scan manager."; - // 心跳模块初始化 + // Heartbeat module initialization HeartbeatOptions heartbeatOptions; InitHeartbeatOptions(&conf, &heartbeatOptions); heartbeatOptions.copysetNodeManager = copysetNodeManager_; @@ -308,7 +308,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) << "Failed to init Heartbeat manager."; - // 监控部分模块的metric指标 + // Metric indicators for monitoring some modules metric->MonitorTrash(trash_.get()); metric->MonitorChunkFilePool(chunkfilePool.get()); if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) { @@ -316,8 +316,8 @@ int ChunkServer::Run(int argc, char** argv) { } metric->ExposeConfigMetric(&conf); - // ========================添加rpc服务===============================// - // TODO(lixiaocui): rpc中各接口添加上延迟metric + // ========================Add RPC Service===============================// + // TODO(lixiaocui): Add delay metric to each interface in rpc brpc::Server server; brpc::Server externalServer; // We need call braft::add_service to add endPoint to braft::NodeManager @@ -385,16 +385,16 @@ int ChunkServer::Run(int argc, char** argv) { brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ScanCopysetService"; - // 启动rpc service + // Start rpc service LOG(INFO) << "Internal server is going to serve on: " << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; if (server.Start(endPoint, NULL) != 0) { LOG(ERROR) << "Fail to start Internal Server"; return -1; } - /* 启动external server - external server用于向client和工具等外部提供服务 - 区别于mds和chunkserver之间的通信*/ + /* Start external server + External server is used to provide services to external clients and tools + Different from communication between MDS and chunkserver*/ if (registerOptions.enableExternalServer) { ret = externalServer.AddService(©setService, brpc::SERVER_DOESNT_OWN_SERVICE); @@ -421,11 +421,11 @@ int ChunkServer::Run(int argc, char** argv) { } } - // =======================启动各模块==================================// + // =======================Start each module==================================// LOG(INFO) << "ChunkServer starts."; /** - * 将模块启动放到rpc 服务启动后面,主要是为了解决内存增长的问题 - * 控制并发恢复的copyset数量,copyset恢复需要依赖rpc服务先启动 + * Placing module startup after RPC service startup is mainly to address memory growth issues + * Control the number of copysets for concurrent recovery. Copyset recovery requires the RPC service to be started first */ LOG_IF(FATAL, trash_->Run() != 0) << "Failed to start trash."; @@ -440,7 +440,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, !chunkfilePool->StartCleaning()) << "Failed to start file pool clean worker."; - // =======================等待进程退出==================================// + // =======================Wait for the process to exit==================================// while (!brpc::IsAskedToQuit()) { bthread_usleep(1000000L); } @@ -751,7 +751,7 @@ void ChunkServer::InitMetricOptions( } void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 + // If there are settings on the command line, the command line overwrites the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) { conf->SetStringValue("global.ip", FLAGS_chunkServerIp); @@ -864,7 +864,7 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { if (!conf->GetStringValue("chunkserver.common.logDir", &FLAGS_log_dir)) { // NOLINT LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf @@ -901,16 +901,16 @@ int ChunkServer::GetChunkServerMetaFromLocal( LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; return -1; } - // 从配置文件中获取chunkserver元数据的文件路径 + // Obtain the file path for chunkserver metadata from the configuration file proto = UriParser::GetProtocolFromUri(metaUri); if (proto != "local") { LOG(ERROR) << "Chunkserver meta protocal " << proto << " is not supported yet"; return -1; } - // 元数据文件已经存在 + // The metadata file already exists if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) { - // 获取文件内容 + // Get File Content if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) { LOG(ERROR) << "Fail to read persisted chunkserver meta data"; return -1; diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index b9e9005545..9dc9830bcf 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -43,17 +43,17 @@ namespace chunkserver { class ChunkServer { public: /** - * @brief 初始化Chunkserve各子模块 + * @brief Initialize Chunkserve sub modules * - * @param[in] argc 命令行参数总数 - * @param[in] argv 命令行参数列表 + * @param[in] argc Total number of command line arguments + * @param[in] argv command line argument list * - * @return 0表示成功,非0失败 + * @return 0 indicates success, non 0 indicates failure */ int Run(int argc, char** argv); /** - * @brief 停止chunkserver,结束各子模块 + * @brief: Stop chunkserver and end each sub module */ void Stop(); @@ -102,22 +102,22 @@ class ChunkServer { const std::string &metaUri, ChunkServerMetadata *metadata); private: - // copysetNodeManager_ 管理chunkserver上所有copysetNode + // copysetNodeManager_ Manage all copysetNodes on the chunkserver CopysetNodeManager* copysetNodeManager_; - // cloneManager_ 管理克隆任务 + // cloneManager_ Manage Clone Tasks CloneManager cloneManager_; // scan copyset manager ScanManager scanManager_; - // heartbeat_ 负责向mds定期发送心跳,并下发心跳中任务 + // heartbeat_ Responsible for regularly sending heartbeat to MDS and issuing tasks in the heartbeat Heartbeat heartbeat_; - // trash_ 定期回收垃圾站中的物理空间 + // trash_ Regularly recycle physical space in the garbage bin std::shared_ptr trash_; - // install snapshot流控 + // install snapshot flow control scoped_refptr snapshotThrottle_; }; diff --git a/src/chunkserver/chunkserver_helper.cpp b/src/chunkserver/chunkserver_helper.cpp index cf12df7f67..c4ec444f9b 100644 --- a/src/chunkserver/chunkserver_helper.cpp +++ b/src/chunkserver/chunkserver_helper.cpp @@ -63,7 +63,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return false; } - // 验证meta是否正确 + // Verify if the meta is correct uint32_t crc = MetadataCrc(*out); if (crc != out->checksum()) { LOG(ERROR) << "ChunkServer persisted metadata CRC dismatch." diff --git a/src/chunkserver/chunkserver_main.cpp b/src/chunkserver/chunkserver_main.cpp index 5bc4cb8736..4c13bf64d8 100644 --- a/src/chunkserver/chunkserver_main.cpp +++ b/src/chunkserver/chunkserver_main.cpp @@ -26,7 +26,7 @@ int main(int argc, char* argv[]) { butil::AtExitManager atExitManager; ::curve::chunkserver::ChunkServer chunkserver; LOG(INFO) << "ChunkServer starting."; - // 这里不能用fork创建守护进程,bvar会存在一些问题 + // You cannot use fork to create daemons here, as bvar may have some issues // https://github.com/apache/incubator-brpc/issues/697 // https://github.com/apache/incubator-brpc/issues/208 chunkserver.Run(argc, argv); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 339ecbbe66..4ac2b80e95 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -37,7 +37,7 @@ IOMetric::IOMetric() IOMetric::~IOMetric() {} int IOMetric::Init(const std::string &prefix) { - // 暴露所有的metric + // Expose all metrics if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; return -1; @@ -96,7 +96,7 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { int CSIOMetric::Init(const std::string &prefix) { - // 初始化io统计项metric + // Initialize IO statistics item metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; std::string recoverPrefix = prefix + "_recover"; @@ -224,8 +224,8 @@ ChunkServerMetric::ChunkServerMetric() ChunkServerMetric *ChunkServerMetric::self_ = nullptr; ChunkServerMetric *ChunkServerMetric::GetInstance() { - // chunkserver metric 在chunkserver启动时初始化创建 - // 因此创建的时候不会存在竞争,不需要锁保护 + // Chunkserver metric initializes creation when chunkserver starts + // Therefore, there will be no competition during creation and lock protection is not required if (self_ == nullptr) { self_ = new ChunkServerMetric; } @@ -245,14 +245,14 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { return 0; } - // 初始化io统计项metric + // Initialize IO statistics item metric int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { LOG(ERROR) << "Init chunkserver metric failed."; return -1; } - // 初始化资源统计 + // Initialize resource statistics std::string leaderCountPrefix = Prefix() + "_leader_count"; leaderCount_ = std::make_shared>(leaderCountPrefix); @@ -278,7 +278,7 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { } int ChunkServerMetric::Fini() { - // 释放资源,从而将暴露的metric从全局的map中移除 + // Release resources to remove exposed metrics from the global map ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; @@ -335,8 +335,8 @@ ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, const CopysetID ©setId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 这里先保存copyset metric,等remove后再去释放 - // 防止在读写锁里面去操作metric,导致死锁 + // Save the copyset metric here first, and then release it after removing it + // Prevent operating metrics within read write locks, resulting in deadlocks auto metric = copysetMetricMap_.Get(groupId); copysetMetricMap_.Remove(groupId); return 0; diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index d4354d196f..015bab5452 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -56,55 +56,55 @@ using PassiveStatusPtr = std::shared_ptr>; template using AdderPtr = std::shared_ptr>; -// 使用LatencyRecorder的实现来统计读写请求的size情况 -// 可以统计分位值、最大值、中位数、平均值等情况 +// Using the implementation of LatencyRecorder to count the size of read and write requests +// Statistics can be conducted on quantile values, maximum values, median values, mean values, and other factors using IOSizeRecorder = bvar::LatencyRecorder; -// io 相关的统计项 +// IO related statistical items class IOMetric { public: IOMetric(); virtual ~IOMetric(); /** - * 初始化 io metric - * 主要用于曝光各metric指标 - * @param prefix: 用于bvar曝光时使用的前缀 - * @return 成功返回0,失败返回-1 + * Initialize io metric + * Mainly used for exposing various metric indicators + * @param prefix: The prefix used for bvar exposure + * @return returns 0 for success, -1 for failure */ int Init(const std::string &prefix); /** - * IO请求到来时统计requestNum + * Count requestNum when IO requests arrive */ void OnRequest(); /** - * IO 完成以后,记录该次IO的指标 - * 错误的io不会计入iops和bps统计 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * After IO is completed, record the indicators for this IO + * Incorrect IO will not be included in iops and bps statistics + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(size_t size, int64_t latUs, bool hasError); public: - // io请求的数量 + // Number of IO requests bvar::Adder reqNum_; - // 成功io的数量 + // Number of successful IO bvar::Adder ioNum_; - // 失败的io个数 + // Number of failed IO bvar::Adder errorNum_; - // 所有io的数据量 + // The data volume of all IO bvar::Adder ioBytes_; - // io的延时情况(分位值、最大值、中位数、平均值) + // Delay situation of IO (quantile, maximum, median, average) bvar::LatencyRecorder latencyRecorder_; - // io大小的情况(分位值、最大值、中位数、平均值) + // The size of IO (quantile, maximum, median, average) IOSizeRecorder sizeRecorder_; - // 最近1秒请求的IO数量 + // Number of IO requests in the last 1 second bvar::PerSecond> rps_; - // 最近1秒的iops + // iops in the last 1 second bvar::PerSecond> iops_; - // 最近1秒的出错IO数量 + // Number of IO errors in the last 1 second bvar::PerSecond> eps_; - // 最近1秒的数据量 + // Data volume in the last 1 second bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -126,49 +126,49 @@ class CSIOMetric { ~CSIOMetric() {} /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type); /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type); /** - * 初始化各项op的metric统计项 - * @return 成功返回0,失败返回-1 + * Initialize metric statistics for each op + * @return returns 0 for success, -1 for failure */ int Init(const std::string &prefix); /** - * 释放各项op的metric资源 + * Release metric resources for various OPs */ void Fini(); protected: - // ReadChunk统计 + // ReadChunk statistics IOMetricPtr readMetric_; - // WriteChunk统计 + // WriteChunk statistics IOMetricPtr writeMetric_; - // RecoverChunk统计 + // RecoverChunk statistics IOMetricPtr recoverMetric_; - // PasteChunk信息 + // PasteChunk Information IOMetricPtr pasteMetric_; - // Download统计 + // Download statistics IOMetricPtr downloadMetric_; }; @@ -182,16 +182,16 @@ class CSCopysetMetric { ~CSCopysetMetric() {} /** - * 初始化copyset级别的metric统计项 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Initialize metric statistics at the copyset level + * @param logicPoolId: The ID of the logical pool to which the copyset belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** - * 监控DataStore指标,主要包括chunk的数量、快照的数量等 - * @param datastore: 该copyset下的datastore指针 + * Monitor DataStore indicators, mainly including the number of chunks, number of snapshots, etc + * @param datastore: The datastore pointer under this copyset */ void MonitorDataStore(CSDataStore *datastore); @@ -202,18 +202,18 @@ class CSCopysetMetric { void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { @@ -221,9 +221,9 @@ class CSCopysetMetric { } /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -264,27 +264,27 @@ class CSCopysetMetric { } private: - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolId_; // copyset id CopysetID copysetId_; - // copyset上的 chunk 的数量 + // Number of chunks on copyset PassiveStatusPtr chunkCount_; // The total number of WAL segment in copyset PassiveStatusPtr walSegmentCount_; - // copyset上的 快照文件 的数量 + // Number of snapshot files on copyset PassiveStatusPtr snapshotCount_; - // copyset上的 clone chunk 的数量 + // The number of clone chunks on the copyset PassiveStatusPtr cloneChunkCount_; - // copyset上的IO类型的metric统计 + // Metric statistics of IO types on copyset CSIOMetric ioMetrics_; }; struct ChunkServerMetricOptions { bool collectMetric; - // chunkserver的ip + // Chunkserver IP std::string ip; - // chunkserver的端口号 + // The port number of chunkserver uint32_t port; ChunkServerMetricOptions() : collectMetric(false), ip("127.0.0.1"), port(8888) {} @@ -344,119 +344,119 @@ class CopysetMetricMap { } private: - // 保护复制组metric map的读写锁 + // Protect the read write lock of the replication group metric map RWLock rwLock_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key std::unordered_map map_; }; class ChunkServerMetric : public Uncopyable { public: - // 实现单例 + // Implementation singleton static ChunkServerMetric *GetInstance(); /** - * 初始化chunkserver统计项 - * @pa)ram option: 初始化配置项 - * @return 成功返回0,失败返回-1 + * Initialize chunkserver statistics + * @param option: Initialize configuration item + * @return returns 0 for success, -1 for failure */ int Init(const ChunkServerMetricOptions &option); /** - * 释放metric资源 - * @return 成功返回0,失败返回-1 + * Release metric resources + * @return returns 0 for success, -1 for failure */ int Fini(); /** - * 请求前记录metric - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 + * Record metric before request + * @param logicPoolId: The logical pool ID where this io operation is located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type */ void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, CSIOMetricType type); /** - * 请求结束时记录该次IO指标 - * 错误的io不会计入iops和bps统计 - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record the IO metric at the end of the request + * Incorrect IO will not be included in iops and bps statistics + * @param logicPoolId: The logical pool ID where this io operation is located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 创建指定copyset的metric - * 如果collectMetric为false,返回0,但实际并不会创建 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 + *Create a metric for the specified copyset + * If collectMetric is false, it returns 0, but it is not actually created + * @param logicPoolId: The ID of the logical pool to which the copyset belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure, or failure if the specified metric already exists */ int CreateCopysetMetric(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** - * 获取指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回指定的copyset metric,失败返回nullptr + * Obtain the metric of the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset belongs + * @param copysetId: The ID of the copyset + * @return successfully returns the specified copyset metric, while failure returns nullptr */ CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** - * 删除指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + *Delete the metric for the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ int RemoveCopysetMetric(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** - * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkFilePool: chunkfilePool的对象指针 + *Monitor the chunk allocation pool, mainly monitoring the number of chunks in the pool + * @param chunkFilePool: Object pointer to chunkfilePool */ void MonitorChunkFilePool(FilePool *chunkFilePool); /** - * 监视wal segment分配池,主要监视池中segment的数量 - * @param walFilePool: walfilePool的对象指针 + *Monitor the allocation pool of wall segments, mainly monitoring the number of segments in the pool + * @param walFilePool: Object pointer to walfilePool */ void MonitorWalFilePool(FilePool *walFilePool); /** - * 监视回收站 - * @param trash: trash的对象指针 + *Monitor Recycle Bin + * @param trash: Object pointer to trash */ void MonitorTrash(Trash *trash); /** - * 增加 leader count 计数 + * Increase the leader count count */ void IncreaseLeaderCount(); /** - * 减少 leader count 计数 + * Reduce leader count count */ void DecreaseLeaderCount(); /** - * 更新配置项数据 - * @param conf: 配置内容 + *Update configuration item data + * @param conf: Configuration content */ void ExposeConfigMetric(common::Configuration *conf); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -522,31 +522,31 @@ class ChunkServerMetric : public Uncopyable { } private: - // 初始化标志 + // Initialization flag bool hasInited_; - // 配置项 + // Configuration Item ChunkServerMetricOptions option_; - // leader 的数量 + // Number of leaders AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // The number of remaining chunks in the chunkfilepool PassiveStatusPtr chunkLeft_; - // walfilepool 中剩余的 wal segment 的数量 + // The number of remaining wal segments in the walfilepool PassiveStatusPtr walSegmentLeft_; - // trash 中的 chunk 的数量 + // Number of chunks in trash PassiveStatusPtr chunkTrashed_; - // chunkserver上的 chunk 的数量 + // Number of chunks on chunkserver PassiveStatusPtr chunkCount_; // The total number of WAL segment in chunkserver PassiveStatusPtr walSegmentCount_; - // chunkserver上的 快照文件 的数量 + // Number of snapshot files on chunkserver PassiveStatusPtr snapshotCount_; - // chunkserver上的 clone chunk 的数量 + // Number of clone chunks on chunkserver PassiveStatusPtr cloneChunkCount_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key CopysetMetricMap copysetMetricMap_; - // chunkserver上的IO类型的metric统计 + // Metric statistics of IO types on chunkserver CSIOMetric ioMetrics_; - // 用于单例模式的自指指针 + // Self pointing pointer for singleton mode static ChunkServerMetric *self_; }; diff --git a/src/chunkserver/cli.h b/src/chunkserver/cli.h index 3c8ecc6997..0765b628a7 100644 --- a/src/chunkserver/cli.h +++ b/src/chunkserver/cli.h @@ -33,37 +33,37 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is convenient to use and avoids direct RPC operations */ -// 获取leader +// Get the leader butil::Status GetLeader(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, PeerId *leaderId); -// 增加一个peer +// Add a peer butil::Status AddPeer(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const PeerId &peer_id, const braft::cli::CliOptions &options); -// 移除一个peer +// Remove a peer butil::Status RemovePeer(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const PeerId &peer_id, const braft::cli::CliOptions &options); -// 转移leader +// Transfer leader butil::Status TransferLeader(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const PeerId &peer, const braft::cli::CliOptions &options); -// 触发快照 +// Trigger snapshot butil::Status Snapshot(const LogicPoolID &logicPoolId, const CopysetID ©setId, const PeerId &peer, diff --git a/src/chunkserver/cli2.cpp b/src/chunkserver/cli2.cpp index 5328724316..91794f940d 100644 --- a/src/chunkserver/cli2.cpp +++ b/src/chunkserver/cli2.cpp @@ -274,14 +274,14 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -// reset peer不走一致性协议,直接将peers重置,因此存在一定的风险 -// 应用场景:大多数节点挂掉的极端情况。在这种情况下,该copyset将无法写入,直 -// 到半小时后mds将挂掉的副本上的copyset迁移,因此有一段时间不可用,为了应对这种场景,引入了 -// reset peer工具,直接将复制组成员reset成只包含存活的副本。 -// 注意事项: -// 1、reset peer之前,需要通过check-copyset工具确认复制组中的大多数副本确实挂掉 -// 2、reset peer的时候,要确保剩下的副本有最新的数据,不然存在丢数据的风险 -// 3、reset peer适用于其他两个副本不能恢复的情况,不然可能会扰乱集群 +// reset peer does not follow a consistency protocol and directly resets them, thus posing certain risks +// Application scenario: Extreme situation where most nodes fail. In this case, the copyset will not be able to be written directly +// After half an hour, MDS will migrate the copyset on the suspended replica, which will be unavailable for a period of time. To cope with this scenario, we have introduced +// The reset peer tool directly resets replication group members to only contain surviving replicas. +// Precautions: +// 1. Before resetting the peer, it is necessary to confirm through the check-copyset tool that most of the replicas in the replication group have indeed been suspended +// 2. When resetting the peer, ensure that the remaining replicas have the latest data, otherwise there is a risk of data loss +// 3. Reset peer is suitable for situations where the other two replicas cannot be restored, otherwise it may disrupt the cluster butil::Status ResetPeer(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration& newPeers, diff --git a/src/chunkserver/cli2.h b/src/chunkserver/cli2.h index ba60e057e7..d34ccb6c8e 100644 --- a/src/chunkserver/cli2.h +++ b/src/chunkserver/cli2.h @@ -33,57 +33,57 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is convenient to use and avoids direct RPC operations */ -// 获取leader +// Get the leader butil::Status GetLeader(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, Peer *leader); -// 增加一个peer +// Add a peer butil::Status AddPeer(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const Peer &peer, const braft::cli::CliOptions &options); -// 移除一个peer +// Remove a peer butil::Status RemovePeer(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const Peer &peer, const braft::cli::CliOptions &options); -// 变更配置 +// Change configuration butil::Status ChangePeers(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const Configuration &newPeers, const braft::cli::CliOptions &options); -// 转移leader +// Transfer leader butil::Status TransferLeader(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf, const Peer &peer, const braft::cli::CliOptions &options); -// 重置复制组 +// Reset replication group butil::Status ResetPeer(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options); -// 触发快照 +// Trigger snapshot butil::Status Snapshot(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Peer& peer, const braft::cli::CliOptions& options); -// 给chunkserver上全部copyset副本触发快照 +// Trigger a snapshot for all copyset replicas on the chunkserver butil::Status SnapshotAll(const Peer& peer, const braft::cli::CliOptions& options); diff --git a/src/chunkserver/clone_copyer.h b/src/chunkserver/clone_copyer.h index 6ccb7d7dc1..a382019538 100644 --- a/src/chunkserver/clone_copyer.h +++ b/src/chunkserver/clone_copyer.h @@ -52,28 +52,28 @@ using std::string; class DownloadClosure; struct CopyerOptions { - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser; - // curvefs 的配置文件路径 + // Profile path for curvefs std::string curveConf; - // s3adapter 的配置文件路径 + // Configuration file path for s3adapter std::string s3Conf; - // curve client的对象指针 + // Object pointer to curve client std::shared_ptr curveClient; - // s3 adapter的对象指针 + // Object pointer to s3 adapter std::shared_ptr s3Client; // curve file's time to live uint64_t curveFileTimeoutSec; }; struct AsyncDownloadContext { - // 源chunk的位置信息 + // Location information of the source chunk string location; - // 请求下载数据在对象中的相对偏移 + // Request to download the relative offset of data in the object off_t offset; - // 请求下载数据的的长度 + // The length of the requested download data size_t size; - // 存放下载数据的缓冲区 + // Buffer for storing downloaded data char* buf; }; @@ -98,22 +98,22 @@ class OriginCopyer { virtual ~OriginCopyer() = default; /** - * 初始化资源 - * @param options: 配置信息 - * @return: 成功返回0,失败返回-1 + * Initialize Resources + * @param options: Configuration information + * @return: Success returns 0, failure returns -1 */ virtual int Init(const CopyerOptions& options); /** - * 释放资源 - * @return: 成功返回0,失败返回-1 + * Release resources + * @return: Success returns 0, failure returns -1 */ virtual int Fini(); /** - * 异步地从源端拷贝数据 - * @param done:包含下载请求的上下文信息, - * 数据下载完成后执行该closure进行回调 + * Asynchronous copying of data from the source + * @param done: Contains contextual information for download requests, + *After the data download is completed, execute the closure for callback */ virtual void DownloadAsync(DownloadClosure* done); @@ -131,7 +131,7 @@ class OriginCopyer { static void DeleteExpiredCurveCache(void* arg); private: - // curvefs上的root用户信息 + // Root user information on curvefs UserInfo curveUser_; // mutex for protect curveOpenTime_ std::mutex timeMtx_; @@ -139,13 +139,13 @@ class OriginCopyer { std::list curveOpenTime_; // curve file's time to live uint64_t curveFileTimeoutSec_; - // 负责跟curve交互 + // Responsible for interacting with curve std::shared_ptr curveClient_; - // 负责跟s3交互 + // Responsible for interacting with s3 std::shared_ptr s3Client_; - // 保护fdMap_的互斥锁 + // Protect fdMap_ Mutex lock for std::mutex mtx_; - // 文件名->文件fd 的映射 + // File name ->Mapping of file fd std::unordered_map fdMap_; // Timer for clean expired curve file bthread::TimerThread timer_; diff --git a/src/chunkserver/clone_core.cpp b/src/chunkserver/clone_core.cpp index b3efe70f36..1af51a7a4f 100644 --- a/src/chunkserver/clone_core.cpp +++ b/src/chunkserver/clone_core.cpp @@ -51,7 +51,7 @@ DownloadClosure::DownloadClosure(std::shared_ptr readRequest, , cloneCore_(cloneCore) , readRequest_(readRequest) , done_(done) { - // 记录初始metric + //Record initial metric if (readRequest_ != nullptr) { const ChunkRequest* request = readRequest_->GetChunkRequest(); ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); @@ -70,7 +70,7 @@ void DownloadClosure::Run() { downloadCtx_->buf, downloadCtx_->size, ReadBufferDeleter); CHECK(readRequest_ != nullptr) << "read request is nullptr."; - // 记录结束metric + //Record End Metric const ChunkRequest* request = readRequest_->GetChunkRequest(); ChunkServerMetric* csMetric = ChunkServerMetric::GetInstance(); uint64_t latencyUs = TimeUtility::GetTimeofDayUs() - beginTime_; @@ -81,7 +81,7 @@ void DownloadClosure::Run() { latencyUs, isFailed_); - // 从源端拷贝数据失败 + //Copying data from the source failed if (isFailed_) { LOG(ERROR) << "download origin data failed: " << " logic pool id: " << request->logicpoolid() @@ -94,17 +94,17 @@ void DownloadClosure::Run() { } if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { - // release doneGuard,将closure交给paste请求处理 + //Release doneGuard, hand over the closure to the pass request for processing cloneCore_->PasteCloneData(readRequest_, ©Data, downloadCtx_->offset, downloadCtx_->size, doneGuard.release()); } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 + //Error or end of processing call closure returned to user cloneCore_->SetReadChunkResponse(readRequest_, ©Data); - // paste clone data是异步操作,很快就能处理完 + //Paste clone data is an asynchronous operation that can be processed quickly cloneCore_->PasteCloneData(readRequest_, ©Data, downloadCtx_->offset, @@ -114,12 +114,12 @@ void DownloadClosure::Run() { } void CloneClosure::Run() { - // 释放资源 + //Release resources std::unique_ptr selfGuard(this); std::unique_ptr requestGuard(request_); std::unique_ptr responseGuard(response_); brpc::ClosureGuard doneGuard(done_); - // 如果userResponse不为空,需要将response_中的相关内容赋值给userResponse + //If userResponse is not empty, you need to set the response_ Assign the relevant content in to userResponse if (userResponse_ != nullptr) { if (response_->has_status()) { userResponse_->set_status(response_->status()); @@ -159,16 +159,16 @@ int CloneCore::CloneReadByLocalInfo( uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 请求提交到CloneManager的时候,chunk一定是clone chunk - // 但是由于有其他请求操作相同的chunk,此时chunk有可能已经被遍写过了 - // 所以此处要先判断chunk是否是clone chunk,如果是再判断是否要拷贝数据 + //When submitting a request to CloneManager, the chunk must be a clone chunk + //However, due to other requests for the same chunk, it is possible that the chunk has already been overwritten at this time + //So here we need to first determine whether the chunk is a clone chunk, and then determine whether to copy the data if so bool needClone = chunkInfo.isClone && (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != Bitmap::NO_POS); if (needClone) { - // TODO(yyk) 这一块可以优化,但是优化方法判断条件可能比较复杂 - // 目前只根据是否存在未写过的page来决定是否要触发拷贝 - // chunk中请求读取范围内的数据存在page未被写过,则需要从源端拷贝数据 + //The TODO(yyk) block can be optimized, but the optimization method may determine complex conditions + //Currently, the decision to trigger copying is only based on whether there are unwritten pages + //If the data within the requested read range in the chunk has a page that has not been written, it is necessary to copy the data from the source side AsyncDownloadContext* downloadCtx = new (std::nothrow) AsyncDownloadContext; downloadCtx->location = chunkInfo.location; @@ -184,12 +184,12 @@ int CloneCore::CloneReadByLocalInfo( return 0; } - // 执行到这一步说明不需要拷贝数据,如果是recover请求可以直接返回成功 - // 如果是ReadChunk请求,则直接读chunk并返回 + //Performing this step indicates that there is no need to copy data. If it is a recover request, it can directly return success + //If it is a ReadChunk request, read the chunk directly and return if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CHUNK_OP_TYPE::CHUNK_OP_READ == request->optype()) { - // 出错或处理结束调用closure返回给用户 + //Error or end of processing call closure returned to user return ReadChunk(readRequest); } return 0; @@ -225,17 +225,17 @@ int CloneCore::HandleReadRequest( brpc::ClosureGuard doneGuard(done); const ChunkRequest* request = readRequest->request_; - // 获取chunk信息 + //Obtain chunk information CSChunkInfo chunkInfo; ChunkID id = readRequest->ChunkId(); std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); /* - * chunk存在:按照查看分析bitmap判断是否可以本地读 - * chunk不存在:如包含clone信息则从clonesource读,否则返回错误 - * 因为上层ReadChunkRequest::OnApply已经处理了NoExist - * 并且cloneinfo不存在的情况 + *Chunk exists: Check and analyze Bitmap to determine if it can be read locally + *Chunk does not exist: if it contains clone information, it will be read from clonesource, otherwise an error will be returned + * Because the upper level ReadChunkRequest::OnApply has already processed NoExist + * And the situation where cloneinfo does not exist */ switch (errorCode) { case CSErrorCode::Success: @@ -246,7 +246,7 @@ int CloneCore::HandleReadRequest( CloneReadByRequestInfo(readRequest, doneGuard.release()); return 0; } - // 否则fallthrough直接返回错误 + //Otherwise, fallthrough will directly return an error FALLTHROUGH_INTENDED; default: LOG(ERROR) << "get chunkinfo failed: " @@ -285,9 +285,9 @@ int CloneCore::ReadChunk(std::shared_ptr readRequest) { return -1; } - // 读成功后需要更新 apply index + //After successful reading, update the apply index readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); - // Return 完成数据读取后可以将结果返回给用户 + //After completing the data reading, Return can return the results to the user readRequest->cntl_->response_attachment().append( chunkData.get(), length); SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); @@ -303,13 +303,13 @@ int CloneCore::SetReadChunkResponse( std::shared_ptr dataStore = readRequest->datastore_; CSErrorCode errorCode = dataStore->GetChunkInfo(id, &chunkInfo); - // 如果chunk不存在,需要判断请求是否带源chunk的信息 - // 如果带了源chunk信息,说明用了lazy分配chunk机制,可以直接返回clone data - // 有一种情况,当请求的chunk是lazy allocate的,请求时chunk在本地是存在的, - // 并且请求读取的部分区域已经被写过,在从源端拷贝数据的时候,chunk又被删除了 - // 这种情况下会被当成正常请求返回,但是返回的数据不符合预期 - // 由于当前我们的curve file都是延迟删除的,文件真正删除时能够确保没有用户IO - // 如果后续添加了一些改动触发到这个问题,则需要进行修复 + //If the chunk does not exist, it is necessary to determine whether the request contains information about the source chunk + //If the source chunk information is provided, it indicates that the lazy allocation chunk mechanism is used, and clone data can be directly returned + //There is a situation where the requested chunk is lazily allocated and the requested chunk exists locally, + //And the requested read area has already been written, and when copying data from the source, the chunk has been deleted again + //In this case, it will be returned as a normal request, but the returned data does not meet expectations + //Due to the current delayed deletion of our curve files, it is ensured that there is no user IO when the files are truly deleted + //If some changes are added later that trigger this issue, it needs to be fixed // TODO(yyk) fix it bool expect = errorCode == CSErrorCode::Success || (errorCode == CSErrorCode::ChunkNotExistError && @@ -327,7 +327,7 @@ int CloneCore::SetReadChunkResponse( size_t length = request->size(); butil::IOBuf responseData; - // 如果chunk存在,则要从chunk中读取已经写过的区域合并后返回 + //If a chunk exists, read the regions that have already been written from the chunk and merge them back if (errorCode == CSErrorCode::Success) { char* chunkData = new (std::nothrow) char[length]; int ret = ReadThenMerge( @@ -343,7 +343,7 @@ int CloneCore::SetReadChunkResponse( } readRequest->cntl_->response_attachment().append(responseData); - // 读成功后需要更新 apply index + //After successful reading, update the apply index readRequest->node_->UpdateAppliedIndex(readRequest->applyIndex); SetResponse(readRequest, CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); return 0; @@ -361,7 +361,7 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 获取chunk文件已经写过和未被写过的区域 + //Obtain the regions where the chunk file has been written and not written std::vector copiedRanges; std::vector uncopiedRanges; if (chunkInfo.isClone) { @@ -376,13 +376,13 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, copiedRanges.push_back(range); } - // 需要读取的起始位置在chunk中的偏移 + //The offset of the starting position to be read in the chunk off_t readOff; - // 读取到的数据要拷贝到缓冲区中的相对偏移 + //The relative offset of the read data to be copied into the buffer off_t relativeOff; - // 每次从chunk读取的数据长度 + //The length of data read from chunk each time size_t readSize; - // 1.Read 对于已写过的区域,从chunk文件中读取 + //1. Read for regions that have already been written, read from the chunk file CSErrorCode errorCode; for (auto& range : copiedRanges) { readOff = range.beginIndex * blockSize; @@ -405,7 +405,7 @@ int CloneCore::ReadThenMerge(std::shared_ptr readRequest, } } - // 2.Merge 对于未写过的区域,从源端下载的区域中拷贝出来进行merge + //2. Merge: For areas that have not been written before, copy them from the downloaded area on the source side for merging for (auto& range : uncopiedRanges) { readOff = range.beginIndex * blockSize; readSize = (range.endIndex - range.beginIndex + 1) * blockSize; @@ -425,7 +425,7 @@ void CloneCore::PasteCloneData(std::shared_ptr readRequest, && !enablePaste_; if (dontPaste) return; - // 数据拷贝完成以后,需要将产生PaseChunkRequest将数据Paste到chunk文件 + //After the data copy is completed, it is necessary to generate a PaseChunkRequest and paste the data to the chunk file ChunkRequest* pasteRequest = new ChunkRequest(); pasteRequest->set_optype(curve::chunkserver::CHUNK_OP_TYPE::CHUNK_OP_PASTE); pasteRequest->set_logicpoolid(request->logicpoolid()); @@ -440,7 +440,7 @@ void CloneCore::PasteCloneData(std::shared_ptr readRequest, closure->SetRequest(pasteRequest); closure->SetResponse(pasteResponse); closure->SetClosure(done); - // 如果是recover chunk的请求,需要将paste的结果通过rpc返回 + //If it is a request for a recover chunk, the result of the pass needs to be returned through rpc if (CHUNK_OP_TYPE::CHUNK_OP_RECOVER == request->optype()) { closure->SetUserResponse(readRequest->response_); } diff --git a/src/chunkserver/clone_core.h b/src/chunkserver/clone_core.h index c91183feb3..679290e1af 100644 --- a/src/chunkserver/clone_core.h +++ b/src/chunkserver/clone_core.h @@ -65,17 +65,17 @@ class DownloadClosure : public Closure { } protected: - // 下载是否出错出错 + // Is there an error in downloading bool isFailed_; - // 请求开始的时间 + // Request start time uint64_t beginTime_; - // 下载请求上下文信息 + // Download request context information AsyncDownloadContext* downloadCtx_; - // clone core对象 + // Clone core object std::shared_ptr cloneCore_; - // read chunk请求对象 + // Read chunk request object std::shared_ptr readRequest_; - // DownloadClosure生命周期结束后需要执行的回调 + // Callbacks to be executed after the end of the DownloadClosure lifecycle Closure* done_; }; @@ -101,13 +101,13 @@ class CloneClosure : public Closure { } private: - // paste chunk的请求结构体 + // Request structure for paste chunk ChunkRequest *request_; - // paste chunk的响应结构体 + // Response structure of paste chunk ChunkResponse *response_; - // 真正要返回给用户的响应结构体 + // The response structure that truly needs to be returned to the user ChunkResponse *userResponse_; - // CloneClosure生命周期结束后需要执行的回调 + // Callbacks to be executed after the end of the CloneClosure lifecycle Closure *done_; }; @@ -122,64 +122,64 @@ class CloneCore : public std::enable_shared_from_this { virtual ~CloneCore() {} /** - * 处理读请求的逻辑 - * @param readRequest[in]:读请求信息 - * @param done[in]:任务完成后要执行的closure - * @return: 成功返回0,失败返回-1 + * Logic for processing read requests + * @param readRequest[in]: Read request information + * @param done[in]: The closure to be executed after the task is completed + * @return: Success returns 0, failure returns -1 */ int HandleReadRequest(std::shared_ptr readRequest, Closure* done); protected: /** - * 本地chunk文件存在情况下,按照本地记录的clone和bitmap信息进行数据读取 - * 会涉及读取远程文件结合本地文件进行merge返回结果 - * @param[in/out] readRequest: 用户请求&响应上下文 - * @param[in] chunkInfo: 对应本地的chunkinfo - * @return 成功返回0,失败返回负数 + * When a local chunk file exists, read data based on the locally recorded clone and bitmap information + * Will involve reading remote files and merging with local files to return results + * @param[in/out] readRequest: User Request&Response Context + * @param[in] chunkInfo: corresponds to the local chunkinfo + * @return Success returns 0, failure returns a negative number */ int CloneReadByLocalInfo(std::shared_ptr readRequest, const CSChunkInfo &chunkInfo, Closure* done); /** - * 本地chunk文件不存在情况下,按照用户请求上下文中带的clonesource信息进行数据读取 - * 不涉及merge本地结果 - * @param[in/out] readRequest: 用户请求&响应上下文 + * When the local chunk file does not exist, read the data according to the clonesource information in the user request context + * Not involving merge local results + * @param[in/out] readRequest: User Request&Response Context */ void CloneReadByRequestInfo(std::shared_ptr readRequest, Closure* done); /** - * 从本地chunk中读取请求的区域,然后设置response - * @param readRequest: 用户的ReadRequest - * @return: 成功返回0,失败返回-1 + * Read the requested area from the local chunk and set the response + * @param readRequest: User's ReadRequest + * @return: Success returns 0, failure returns -1 */ int ReadChunk(std::shared_ptr readRequest); /** - * 设置read chunk类型的response,包括返回的数据和其他返回参数 - * 从本地chunk中读取已被写过的区域,未写过的区域从克隆下来的数据中获取 - * 然后将数据在内存中merge - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端拷贝下来的数据,数据起始偏移同请求中的偏移 - * @return: 成功返回0,失败返回-1 + * Set the response of the read chunk type, including the returned data and other return parameters + * Read the written area from the local chunk, and obtain the unwritten area from the cloned data + * Then merge the data into memory + * @param readRequest: User's ReadRequest + * @param cloneData: The data copied from the source has the same starting offset as the offset in the request + * @return: Success returns 0, failure returns -1 */ int SetReadChunkResponse(std::shared_ptr readRequest, const butil::IOBuf* cloneData); - // 从本地chunk中读取已经写过的区域合并到clone data中 + // Read the previously written regions from the local chunk and merge them into clone data int ReadThenMerge(std::shared_ptr readRequest, const CSChunkInfo& chunkInfo, const butil::IOBuf* cloneData, char* chunkData); /** - * 将从源端下载下来的数据paste到本地chunk文件中 - * @param readRequest: 用户的ReadRequest - * @param cloneData: 从源端下载的数据 - * @param offset: 下载的数据在chunk文件中的偏移 - * @param cloneDataSize: 下载的数据长度 - * @param done:任务完成后要执行的closure + * Paste the downloaded data from the source into the local chunk file + * @param readRequest: User's ReadRequest + * @param cloneData: Data downloaded from the source + * @param offset: The offset of the downloaded data in the chunk file + * @param cloneDataSize: Download data length + * @param done: The closure to be executed after the task is completed */ void PasteCloneData(std::shared_ptr readRequest, const butil::IOBuf* cloneData, @@ -191,11 +191,11 @@ class CloneCore : public std::enable_shared_from_this { CHUNK_OP_STATUS status); private: - // 每次拷贝的slice的大小 + // The size of each copied slice uint32_t sliceSize_; - // 判断read chunk类型的请求是否需要paste, true需要paste,false表示不需要 + // Determine whether a read chunk type request requires paste. True requires paste, while false indicates no need bool enablePaste_; - // 负责从源端下载数据 + // Responsible for downloading data from the source std::shared_ptr copyer_; }; diff --git a/src/chunkserver/clone_manager.cpp b/src/chunkserver/clone_manager.cpp index 6fc428bdba..71d912819f 100644 --- a/src/chunkserver/clone_manager.cpp +++ b/src/chunkserver/clone_manager.cpp @@ -40,7 +40,7 @@ int CloneManager::Init(const CloneOptions& options) { int CloneManager::Run() { if (isRunning_.load(std::memory_order_acquire)) return 0; - // 启动线程池 + // Start Thread Pool LOG(INFO) << "Begin to run clone manager."; tp_ = std::make_shared>(); int ret = tp_->Start(options_.threadNum, options_.queueCapacity); @@ -70,7 +70,7 @@ int CloneManager::Fini() { std::shared_ptr CloneManager::GenerateCloneTask( std::shared_ptr request, ::google::protobuf::Closure *done) { - // 如果core是空的,任务无法被处理,所以返回空 + // If the core is empty, the task cannot be processed, so it returns empty if (options_.core == nullptr) return nullptr; diff --git a/src/chunkserver/clone_manager.h b/src/chunkserver/clone_manager.h index 01f7088218..08ec7b3da7 100644 --- a/src/chunkserver/clone_manager.h +++ b/src/chunkserver/clone_manager.h @@ -44,13 +44,13 @@ using curve::common::TaskThreadPool; class ReadChunkRequest; struct CloneOptions { - // 核心逻辑处理类 + // Core logic processing class std::shared_ptr core; - // 最大线程数 + // Maximum number of threads uint32_t threadNum; - // 最大队列深度 + // Maximum queue depth uint32_t queueCapacity; - // 任务状态检查的周期,单位ms + // The cycle of task status check, in ms uint32_t checkPeriod; CloneOptions() : core(nullptr) , threadNum(10) @@ -64,49 +64,49 @@ class CloneManager { virtual ~CloneManager(); /** - * 初始化 + * Initialize * - * @param options[in]:初始化参数 - * @return 错误码 + * @param options[in]: initialization parameters + * @return error code */ virtual int Init(const CloneOptions& options); /** - * 启动所有线程 + * Start all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Run(); /** - * 停止所有线程 + * Stop all threads * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ virtual int Fini(); /** - * 生成克隆任务 - * @param request[in]:请求信息 - * @return:返回生成的克隆任务,如果生成失败,返回nullptr + * Generate Clone Task + * @param request[in]: Request information + * @return: Returns the generated clone task. If the generation fails, returns nullptr */ virtual std::shared_ptr GenerateCloneTask( std::shared_ptr request, ::google::protobuf::Closure* done); /** - * 发布克隆任务,产生克隆任务放到线程池中处理 - * @param task[in]:克隆任务 - * @return 成功返回true,失败返回false + * Publish clone tasks, generate clone tasks, and place them in the thread pool for processing + * @param task[in]: Clone task + * @return returns true for success, false for failure */ virtual bool IssueCloneTask(std::shared_ptr cloneTask); private: - // 克隆任务管理相关的选项,调Init的时候初始化 + // Clone task management related options, initialization when calling Init CloneOptions options_; - // 处理克隆任务的异步线程池 + // Asynchronous thread pool for processing cloning tasks std::shared_ptr> tp_; - // 当前线程池是否处于工作状态 + // Is the current thread pool in working state std::atomic isRunning_; }; diff --git a/src/chunkserver/clone_task.h b/src/chunkserver/clone_task.h index 48766bce9a..5ed26a0539 100644 --- a/src/chunkserver/clone_task.h +++ b/src/chunkserver/clone_task.h @@ -70,13 +70,13 @@ class CloneTask : public Uncopyable } protected: - // 克隆核心逻辑 + // Clone Core Logic std::shared_ptr core_; - // 此次任务相关信息 + // Information related to this task std::shared_ptr readRequest_; - // 任务结束后要执行的Closure + // Closure to be executed after the task is completed ::google::protobuf::Closure* done_; - // 任务是否结束 + // Is the task completed bool isComplete_; }; diff --git a/src/chunkserver/conf_epoch_file.cpp b/src/chunkserver/conf_epoch_file.cpp index 6a39c6ce3e..4c0e6bbb4e 100644 --- a/src/chunkserver/conf_epoch_file.cpp +++ b/src/chunkserver/conf_epoch_file.cpp @@ -30,7 +30,7 @@ namespace curve { namespace chunkserver { -// conf.epoch文件最大长度 +// Maximum length of conf.epoch file const uint32_t kConfEpochFileMaxSize = 4096; const uint64_t kConfEpochFileMagic = 0x6225929368674119; @@ -47,7 +47,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, char json[kConfEpochFileMaxSize] = {0}; int size = 0; - // 1. read数据 + // 1. Read data size = fs_->Read(fd, json, 0, kConfEpochFileMaxSize); if (size <= 0) { LOG(ERROR) << "LoadConfEpoch read failed: " << path @@ -58,7 +58,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, } fs_->Close(fd); - // 2.反序列化 + // 2. Deserialization ConfEpoch confEpoch; std::string jsonStr(json); std::string err; @@ -71,7 +71,7 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, return -1; } - // 3. 验证crc + // 3. Verify CRC uint32_t crc32c = ConfEpochCrc(confEpoch); if (crc32c != confEpoch.checksum()) { LOG(ERROR) << "conf epoch crc error: " << jsonStr; @@ -91,13 +91,13 @@ int ConfEpochFile::Load(const std::string &path, LogicPoolID *logicPoolID, int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, const CopysetID copysetID, const uint64_t epoch) { - // 1. 转换成conf message + // 1. Convert to conf message ConfEpoch confEpoch; confEpoch.set_logicpoolid(logicPoolID); confEpoch.set_copysetid(copysetID); confEpoch.set_epoch(epoch); - // 计算crc + // Calculate crc uint32_t crc32c = ConfEpochCrc(confEpoch); confEpoch.set_checksum(crc32c); @@ -113,7 +113,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 2. open文件 + // 2. Open file int fd = fs_->Open(path.c_str(), O_RDWR | O_CREAT); if (0 > fd) { LOG(ERROR) << "LoadConfEpoch failed open file " << path @@ -122,7 +122,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 3. write文件 + // 3. Write file if (static_cast(out.size()) != fs_->Write(fd, out.c_str(), 0, out.size())) { LOG(ERROR) << "SaveConfEpoch write failed, path: " << path @@ -132,7 +132,7 @@ int ConfEpochFile::Save(const std::string &path, const LogicPoolID logicPoolID, return -1; } - // 4. 落盘 + // 4. Falling disc if (0 != fs_->Fsync(fd)) { LOG(ERROR) << "SaveConfEpoch sync failed, path: " << path << ", errno: " << errno diff --git a/src/chunkserver/conf_epoch_file.h b/src/chunkserver/conf_epoch_file.h index 91ee27ec6b..89dc05c9d8 100644 --- a/src/chunkserver/conf_epoch_file.h +++ b/src/chunkserver/conf_epoch_file.h @@ -38,8 +38,8 @@ using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; /** - * 配置版本序列化和反序列化的工具类 - * TODO(wudemiao): 后期替换采用json编码 + * Tool classes for configuring version serialization and deserialization + * TODO(wudemiao): Post replacement using JSON encoding */ class ConfEpochFile { public: @@ -47,12 +47,12 @@ class ConfEpochFile { : fs_(fs) {} /** - * 加载快照文件中的配置版本 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本,出参,返回读取的epoch值 - * @return 0,成功; -1失败 + * Load the configuration version in the snapshot file + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version, output parameters, return the read epoch value + * @return 0, successful- 1 failed */ int Load(const std::string &path, LogicPoolID *logicPoolID, @@ -60,17 +60,17 @@ class ConfEpochFile { uint64_t *epoch); /** - * 保存配置版本信息到快照文件中序列化的格式如下,处理head表示长度,使用二 - * 进制,其它都是文本格式,便于必要的时候能够直接用查看,sync保证数据落盘 - * | head | 配置版本信息 | - * | 8 bytes size_t | uint32_t | 变 长文本 | - * | length | crc32 | logic pool id | copyset id | epoch | - * 上面的持久化使用 ‘:’ 分隔 - * @param path:文件路径 - * @param logicPoolID:逻辑池id - * @param copysetID:复制组id - * @param epoch:配置版本 - * @return 0成功; -1失败 + * Serialize configuration version information and save it to a snapshot file. The format is as follows: + * The 'head' indicates the length and is in binary format. The rest is in text format for easy viewing when necessary. 'sync' ensures data persistence. + *| head | Configuration version information | + *| 8 bytes size_t | uint32_t | Variable length text | + *| length | crc32 | logic pool id | copyset id | epoch| + * The persistence above is separated by ':' + * @param path: File path + * @param logicPoolID: Logical Pool ID + * @param copysetID: Copy group ID + * @param epoch: Configuration version + * @return 0 succeeded- 1 failed */ int Save(const std::string &path, const LogicPoolID logicPoolID, diff --git a/src/chunkserver/config_info.h b/src/chunkserver/config_info.h index 67c3f57524..eb41aef7d4 100644 --- a/src/chunkserver/config_info.h +++ b/src/chunkserver/config_info.h @@ -43,13 +43,13 @@ class CopysetNodeManager; class CloneManager; /** - * copyset node的配置选项 + *Configuration options for copyset node */ struct CopysetNodeOptions { - // follower to candidate 超时时间,单位ms,默认是1000ms + // follower to candidate timeout, in ms, defaults to 1000ms int electionTimeoutMs; - // 定期打快照的时间间隔,默认3600s,也就是1小时 + //The time interval for taking regular snapshots is 3600s by default, which is 1 hour int snapshotIntervalS; // If true, read requests will be invoked in current lease leader node. @@ -57,79 +57,79 @@ struct CopysetNodeOptions { // Default: true bool enbaleLeaseRead; - // 如果follower和leader日志相差超过catchupMargin, - // 就会执行install snapshot进行恢复,默认: 1000 + //If the difference between the follower and leader logs exceeds catchupMargin, + //Will execute install snapshot for recovery, default: 1000 int catchupMargin; - // 是否开启pthread执行用户代码,默认false + //Enable pthread to execute user code, default to false bool usercodeInPthread; - // 所有uri个格式: ${protocol}://${绝对或者相对路径} + //All uri formats: ${protocol}://${absolute or relative path} // eg: // posix: local // bluestore: bluestore - // raft log uri, 默认raft_log + //Raft log uri, default raft_log std::string logUri; - // raft meta uri, 默认raft_meta + //Raft meta uri, default raft_meta std::string raftMetaUri; - // raft snapshot uri,默认raft_snpashot + //Raft snapshot uri, default raft_snpashot std::string raftSnapshotUri; - // chunk data uri,默认data + //Chunk data uri, default data std::string chunkDataUri; - // chunk snapshot uri,默认snapshot + //Chunk snapshot uri, default snapshot std::string chunkSnapshotUri; - // copyset data recycling uri,默认recycler + //Copyset data recycling uri, default recycler std::string recyclerUri; std::string ip; uint32_t port; - // chunk文件的大小 + //Chunk file size uint32_t maxChunkSize; // WAL segment file size uint32_t maxWalSegmentSize; - // chunk文件的page大小 + //The page size of the chunk file uint32_t metaPageSize; // alignment for I/O request uint32_t blockSize; - // clone chunk的location长度限制 + //Location length limit for clone chunks uint32_t locationLimit; - // 并发模块 + //Concurrent module ConcurrentApplyModule *concurrentapply; - // Chunk file池子 + //Chunk file pool std::shared_ptr chunkFilePool; // WAL file pool std::shared_ptr walFilePool; - // 文件系统适配层 + //File System Adaptation Layer std::shared_ptr localFileSystem; - // 回收站, 心跳模块判断该chunkserver不在copyset配置组时, - // 通知copysetManager将copyset目录移动至回收站 - // 一段时间后实际回收物理空间 + //When the recycle bin and heartbeat module determine that the chunkserver is not in the copyset configuration group, + //Notify the copysetManager to move the copyset directory to the recycle bin + //Actual recovery of physical space after a period of time std::shared_ptr trash; - // snapshot流控 + //Snapshot flow control scoped_refptr *snapshotThrottle; - // 限制chunkserver启动时copyset并发恢复加载的数量,为0表示不限制 + //Limit the number of copyset concurrent recovery loads during chunkserver startup, with a value of 0 indicating no limit uint32_t loadConcurrency = 0; // chunkserver sync_thread_pool number of threads. uint32_t syncConcurrency = 20; // copyset trigger sync timeout uint32_t syncTriggerSeconds = 25; - // 检查copyset是否加载完成出现异常时的最大重试次数 - // 可能的异常:1.当前大多数副本还没起来;2.网络问题等导致无法获取leader - // 3.其他的原因导致无法获取到leader的committed index + //Check if the copyset has completed loading and the maximum number of retries when an exception occurs + //Possible exceptions: 1. Currently, most replicas have not yet been restored; 2. Network issues and other issues preventing the acquisition of leaders + //3. Due to other reasons, it is not possible to obtain the committed index of the leader uint32_t checkRetryTimes = 3; - // 当前peer的applied_index与leader上的committed_index差距小于该值 - // 则判定copyset已经加载完成 + //the difference bewteen the current peer's applied_index and leader's committed_index is less than this value + //Then it is determined that the copyset has been loaded successfully uint32_t finishLoadMargin = 2000; - // 循环判定copyset是否加载完成的内部睡眠时间 + //Internal sleep time for loop determination of whether copyset has been loaded and completed uint32_t checkLoadMarginIntervalMs = 1000; // enable O_DSYNC when open chunkfile @@ -145,7 +145,7 @@ struct CopysetNodeOptions { }; /** - * ChunkServiceManager 的依赖项 + *Dependencies for ChunkServiceManager */ struct ChunkServiceOptions { CopysetNodeManager *copysetNodeManager; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index a00f7aaf9a..f68eb30d54 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -89,7 +89,7 @@ CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, } CopysetNode::~CopysetNode() { - // 移除 copyset的metric + // Remove metric from copyset ChunkServerMetric::GetInstance()->RemoveCopysetMetric(logicPoolId_, copysetId_); metric_ = nullptr; @@ -108,7 +108,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { std::string protocol = curve::common::UriParser::ParseUri( options.chunkDataUri, ©setDirPath_); if (protocol.empty()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << options.chunkDataUri << ". Copyset: " << GroupIdString(); @@ -140,7 +140,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "data store init failed. " << "Copyset: " << GroupIdString(); return -1; @@ -166,13 +166,13 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { // initialize raft node options corresponding to the copy set node InitRaftNodeOptions(options); - /* 初始化 peer id */ + /* Initialize peer id */ butil::ip_t ip; butil::str2ip(options.ip.c_str(), &ip); butil::EndPoint addr(ip, options.port); /** - * idx默认是零,在chunkserver不允许一个进程有同一个copyset的多副本, - * 这一点注意和不让braft区别开来 + * The default idx is zero, and chunkserver does not allow a process to have multiple copies of the same copyset, + * Pay attention to this point and not distinguish between braces */ peerId_ = PeerId(addr, 0); raftNode_ = std::make_shared(groupId, peerId_); @@ -180,7 +180,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { /* - * 初始化copyset性能metrics + * Initialize copyset performance metrics */ int ret = ChunkServerMetric::GetInstance()->CreateCopysetMetric( logicPoolId_, copysetId_); @@ -192,7 +192,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric( logicPoolId_, copysetId_); if (metric_ != nullptr) { - // TODO(yyk) 后续考虑添加datastore层面的io metric + // TODO(yyk) will consider adding io metrics at the datastore level in the future metric_->MonitorDataStore(dataStore_.get()); } @@ -213,7 +213,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { } int CopysetNode::Run() { - // raft node的初始化实际上让起run起来 + // The initialization of the raft node actually starts running if (0 != raftNode_->init(nodeOptions_)) { LOG(ERROR) << "Fail to init raft node. " << "Copyset: " << GroupIdString(); @@ -237,14 +237,14 @@ void CopysetNode::Fini() { WaitSnapshotDone(); if (nullptr != raftNode_) { - // 关闭所有关于此raft node的服务 + // Close all services related to this raft node raftNode_->shutdown(nullptr); - // 等待所有的正在处理的task结束 + // Waiting for all tasks being processed to end raftNode_->join(); } if (nullptr != concurrentapply_) { - // 将未刷盘的数据落盘,如果不刷盘 - // 迁移copyset时,copyset移除后再去执行WriteChunk操作可能出错 + // Drop the data that has not been flushed onto the disk, if not flushed + // When migrating a copyset, removing the copyset before executing the WriteChunk operation may result in errors concurrentapply_->Flush(); } } @@ -284,19 +284,19 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { void CopysetNode::on_apply(::braft::Iterator &iter) { for (; iter.valid(); iter.next()) { - // 放在bthread中异步执行,避免阻塞当前状态机的执行 + // Asynchronous execution in bthread to avoid blocking the execution of the current state machine braft::AsyncClosureGuard doneGuard(iter.done()); /** - * 获取向braft提交任务时候传递的ChunkClosure,里面包含了 - * Op的所有上下文 ChunkOpRequest + * Obtain the ChunkClosure passed when submitting tasks to Braft, which includes + * All Contextual ChunkOpRequest for Op */ braft::Closure *closure = iter.done(); if (nullptr != closure) { /** - * 1.closure不是null,那么说明当前节点正常,直接从内存中拿到Op - * context进行apply + * 1. If the closure is not null, it indicates that the current node is normal and Op is directly obtained from memory + * Apply in context */ ChunkClosure *chunkClosure = dynamic_cast(iter.done()); @@ -307,12 +307,12 @@ void CopysetNode::on_apply(::braft::Iterator &iter) { &ChunkOpRequest::OnApply, opRequest, iter.index(), doneGuard.release()); } else { - // 获取log entry + // Obtain log entry butil::IOBuf log = iter.data(); /** - * 2.closure是null,有两种情况: - * 2.1. 节点重启,回放apply,这里会将Op log entry进行反序列化, - * 然后获取Op信息进行apply + * 2. If the closure is null, there are two situations: + * 2.1. Restart the node and replay the application. Here, the Op log entry will be deserialized, + * Then obtain Op information for application * 2.2. follower apply */ ChunkRequest request; @@ -350,7 +350,7 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, brpc::ClosureGuard doneGuard(done); /** - * 1.flush I/O to disk,确保数据都落盘 + * 1. flush I/O to disk to ensure that all data is dropped */ concurrentapply_->Flush(); @@ -359,7 +359,7 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 2.保存配置版本: conf.epoch,注意conf.epoch是存放在data目录下 + * 2. Save the configuration version: conf.epoch, please note that conf.epoch is stored in the data directory */ std::string filePathTemp = writer->get_path() + "/" + kCurveConfEpochFilename; @@ -373,19 +373,19 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 3.保存chunk文件名的列表到快照元数据文件中 + * 3. Save the list of chunk file names to the snapshot metadata file */ std::vector files; if (0 == fs_->List(chunkDataApath_, &files)) { for (const auto& fileName : files) { - // raft保存快照时,meta信息中不用保存快照文件列表 - // raft下载快照的时候,在下载完chunk以后,会单独获取snapshot列表 + // When saving a snapshot in the raft, there is no need to save the list of snapshot files in the meta information + // When raft downloads a snapshot, after downloading the chunk, a separate snapshot list will be obtained bool isSnapshot = DatastoreFileHelper::IsSnapshotFile(fileName); if (isSnapshot) { continue; } std::string chunkApath; - // 通过绝对路径,算出相对于快照目录的路径 + // Calculate the path relative to the snapshot directory through absolute path chunkApath.append(chunkDataApath_); chunkApath.append("/").append(fileName); std::string filePath = curve::common::CalcRelativePath( @@ -401,16 +401,16 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 4. 保存conf.epoch文件到快照元数据文件中 + * 4. Save the conf.epoch file to the snapshot metadata file */ writer->add_file(kCurveConfEpochFilename); } int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { /** - * 1. 加载快照数据 + * 1. Loading snapshot data */ - // 打开的 snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 + // Open snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 std::string snapshotPath = reader->get_path(); // /mnt/sda/1-10001/raft_snapshot/snapshot_0043/data @@ -419,13 +419,13 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { snapshotChunkDataDir.append("/").append(chunkDataRpath_); LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir << ", Copyset: " << GroupIdString(); - // 如果数据目录不存在,那么说明 load snapshot 数据部分就不需要处理 + // If the data directory does not exist, then the load snapshot data section does not need to be processed if (fs_->DirExists(snapshotChunkDataDir)) { - // 加载快照数据前,要先清理copyset data目录下的文件 - // 否则可能导致快照加载以后存在一些残留的数据 - // 如果delete_file失败或者rename失败,当前node状态会置为ERROR - // 如果delete_file或者rename期间进程重启,copyset起来后会加载快照 - // 由于rename可以保证原子性,所以起来加载快照后,data目录一定能还原 + // Before loading snapshot data, clean the files in the copyset data directory first + // Otherwise, it may result in some residual data after the snapshot is loaded + // If delete_file or rename fails, the current node status will be set to ERROR + // If delete_file or during the rename the process restarts, and after copyset is set, the snapshot will be loaded + // Since rename ensures atomicity, after loading the snapshot, the data directory must be restored bool ret = nodeOptions_.snapshot_file_system_adaptor->get()-> delete_file(chunkDataApath_, true); if (!ret) { @@ -455,7 +455,7 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 2. 加载配置版本文件 + * 2. Load Configuration Version File */ std::string filePath = reader->get_path() + "/" + kCurveConfEpochFilename; if (fs_->FileExists(filePath)) { @@ -468,20 +468,20 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 3.重新init data store,场景举例: + * 3. Reinitializing the data store, with examples: * - * (1) 例如一个add peer,之后立马read这个时候data store会返回chunk - * not exist,因为这个新增的peer在刚开始起来的时候,没有任何数据,这 - * 个时候data store init了,那么新增的peer在leader恢复了数据之后, - * data store并不感知; + * (1) For instance, when adding a new peer and immediately reading data, the data store may return "chunk not exist." + * This is because the newly added peer initially has no data, + * and when the data store is initialized, it is not aware of + * the data that the new peer receives after the leader recovers its data. * - * (2) peer通过install snapshot恢复了所有的数据是通过rename操作的, - * 如果某个file之前被data store打开了,那么rename能成功,但是老的 - * 文件只有等data store close老的文件才能删除,所以需要重新init data - * store,并且close的文件的fd,然后重新open新的文件,不然data store - * 会一直是操作的老的文件,而一旦data store close相应的fd一次之后, - * 后面的write的数据就会丢,除此之外,如果 datastore init没有重新open - * 文件,也将导致read不到恢复过来的数据,而是read到老的数据。 + * (2) When a peer recovers all of its data through an install snapshot operation, it is performed through a rename operation. + * If a file was previously open in the data store, the rename operation can succeed, but the old file can only be deleted + * after the data store closes it. Therefore, it is necessary to reinitialize the data store, close the file's file descriptor (fd), + * and then reopen the new file. Otherwise, the data store will continue to operate on the old file. + * Once the data store closes, the corresponding fd, any subsequent write operations will be lost. + * Additionally, if the datastore is not reinitialized + * and the new file is not reopened, it may result in reading the old data rather than the recovered data. */ if (!dataStore_->Initialize()) { LOG(ERROR) << "data store init failed in on snapshot load. " @@ -490,8 +490,8 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 4.如果snapshot中存 conf,那么加载初始化,保证不需要以来 - * on_configuration_committed。需要注意的是这里会忽略joint stage的日志。 + * 4. If conf is stored in the snapshot, load initialization to ensure that there is no need for + * on_configuration_committed. It should be noted that the log of the joint stage will be ignored here. */ braft::SnapshotMeta meta; reader->load_meta(&meta); @@ -845,14 +845,14 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { void CopysetNode::UpdateAppliedIndex(uint64_t index) { uint64_t curIndex = appliedIndex_.load(std::memory_order_acquire); - // 只更新比自己大的 index + // Only update indexes larger than oneself if (index > curIndex) { /** - * compare_exchange_strong解释: - * 首先比较curIndex是不是等于appliedIndex,如果是,那么说明没有人 - * 修改appliedindex,那么用index去修改appliedIndex,更新成功,完成; - * 如果不等于,说明有人更新了appliedindex,那么通过curIndex返回当前 - * 的appliedindex,并且返回false。整个过程都是原子的 + * Explanation of compare_exchange_strong: + * First, it compares whether curIndex is equal to appliedIndex. If it is equal, it means that no one has modified appliedindex. + * In this case, it tries to update appliedIndex with the value of index, and if the update is successful, it's done. + * If curIndex is not equal to appliedindex, it indicates that someone else has updated appliedIndex in the meantime. In this case, it returns the current + * value of appliedindex through curIndex and returns false. This entire process is atomic. */ while (!appliedIndex_.compare_exchange_strong(curIndex, index, @@ -888,15 +888,15 @@ int CopysetNode::GetConfChange(ConfigChangeType *type, Configuration *oldConf, Peer *alterPeer) { /** - * 避免new leader当选leader之后,提交noop entry之前,epoch和 - * 配置可能不一致的情况。考虑如下情形: + * To prevent inconsistencies between the epoch and configuration before + * a new leader is elected and a noop entry is committed, consider the following scenario: * - * 三个成员的复制组{ABC},当前epoch=5,A是leader,收到配置配置+D, - * 假设B收到了{ABC+D}的配置变更日志,然后leader A挂了,B当选为了 - * new leader,在B提交noop entry之前,B上查询到的epoch值最大可能为5, - * 而查询到的配置确实{ABCD}了,所以这里在new leader B在提交noop entry - * 之前,也就是实现隐公提交配置变更日志{ABC+D}之前,不允许向用户返回 - * 配置和配置变更信息,避免epoch和配置信息不一致 + * In a replication group with three members {ABC}, the current epoch is 5, and A is the leader. A receives a configuration change log that adds D, and assume that B also receives the configuration change + * log {ABC+D}. Then, leader A crashes, and B is elected as the new leader. + * Before B commits the noop entry, the maximum epoch value it can query on B is still 5, + * but the queried configuration is {ABCD}. Therefore, here, before the new leader B commits the noop entry, + * which is effectively committing the hidden configuration change log {ABC+D}, it does not allow returning the configuration and configuration change information to the user + * to avoid epoch and configuration information inconsistency. */ if (leaderTerm_.load(std::memory_order_acquire) <= 0) { *type = ConfigChangeType::NONE; @@ -934,7 +934,7 @@ int CopysetNode::GetHash(std::string *hash) { return -1; } - // 计算所有chunk文件crc需要保证计算的顺序是一样的 + // Calculating all chunk files' crc requires ensuring that the order of calculations is the same std::sort(files.begin(), files.end()); for (std::string file : files) { diff --git a/src/chunkserver/copyset_node.h b/src/chunkserver/copyset_node.h index cf7a34aeec..48b911b11b 100755 --- a/src/chunkserver/copyset_node.h +++ b/src/chunkserver/copyset_node.h @@ -100,9 +100,9 @@ class ConfigurationChangeDone : public braft::Closure { explicit ConfigurationChangeDone( std::shared_ptr cfgChange) : curCfgChange(cfgChange) {} - // copyset node中当前的配置变更信息 + // Current configuration change information in the copyset node std::shared_ptr curCfgChange; - // 这次配置变更对应的配置变更信息 + // The configuration change information corresponding to this configuration change ConfigurationChange expectedCfgChange; }; @@ -125,7 +125,7 @@ class SyncChunkThread : public curve::common::Uncopyable { }; /** - * 一个Copyset Node就是一个复制组的副本 + * A Copyset Node is a replica of a replication group */ class CopysetNode : public braft::StateMachine, public std::enable_shared_from_this { @@ -140,31 +140,31 @@ class CopysetNode : public braft::StateMachine, virtual ~CopysetNode(); /** - * 初始化copyset node配置 + * Initialize copyset node configuration * @param options - * @return 0,成功,-1失败 + * @return 0, successful, -1 failed */ virtual int Init(const CopysetNodeOptions &options); /** - * Raft Node init,使得Raft Node运行起来 + * Raft Node init to make Raft Node run * @return */ virtual int Run(); /** - * 关闭copyset node + * Close copyset node */ virtual void Fini(); /** - * 返回复制组的逻辑池ID + * Returns the logical pool ID of the replication group * @return */ LogicPoolID GetLogicPoolId() const; /** - * 返回复制组的复制组ID + * Returns the replication group ID of the replication group * @return */ CopysetID GetCopysetId() const; @@ -180,13 +180,13 @@ class CopysetNode : public braft::StateMachine, virtual std::vector& GetFailedScanMap(); /** - * 返回复制组数据目录 + * Return to the replication group data directory * @return */ std::string GetCopysetDir() const; /** - * 返回当前副本是否在leader任期 + * Returns whether the current replica is in the leader's tenure * @return */ virtual bool IsLeaderTerm() const; @@ -204,83 +204,83 @@ class CopysetNode : public braft::StateMachine, virtual bool IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const; // NOLINT /** - * 返回当前的任期 - * @return 当前的任期 + * Return to current tenure + * @return Current tenure */ virtual uint64_t LeaderTerm() const; /** - * 返回leader id + * Return leader id * @return */ virtual PeerId GetLeaderId() const; /** - * @brief 切换复制组的Leader - * @param[in] peerId 目标Leader的成员ID - * @return 心跳任务的引用 + * @brief Switch the leader of the replication group + * @param[in] peerId The member ID of the target leader + * @return Reference to Heartbeat Task */ butil::Status TransferLeader(const Peer& peer); /** - * @brief 复制组添加新成员 - * @param[in] peerId 新成员的ID - * @return 心跳任务的引用 + * @brief Add new members to the replication group + * @param[in] peerId The ID of the new member + * @return Reference to Heartbeat Task */ butil::Status AddPeer(const Peer& peer); /** - * @brief 复制组删除成员 - * @param[in] peerId 将要删除成员的ID - * @return 心跳任务的引用 + * @brief Copy Group Delete Members + * @param[in] peerId The ID of the member to be deleted + * @return Reference to Heartbeat Task */ butil::Status RemovePeer(const Peer& peer); /** - * @brief 变更复制组成员 - * @param[in] newPeers 新的复制组成员 - * @return 心跳任务的引用 + * @brief Change replication group members + * @param[in] newPeers New replication group member + * @return Reference to Heartbeat Task */ butil::Status ChangePeer(const std::vector& newPeers); /** - * 返回copyset的配置版本 + * Returns the configuration version of the copyset * @return */ virtual uint64_t GetConfEpoch() const; /** - * 更新applied index,只有比它大的才更新 + * Update the applied index, only those larger than it will be updated * @param index */ virtual void UpdateAppliedIndex(uint64_t index); /** - * 返回当前最新的applied index + * Returns the current latest applied index * @return */ virtual uint64_t GetAppliedIndex() const; /** - * @brief: 查询配置变更的状态 - * @param type[out]: 配置变更类型 - * @param oldConf[out]: 老的配置 - * @param alterPeer[out]: 变更的peer - * @return 0查询成功,-1查询异常失败 + * @brief: Query the status of configuration changes + * @param type[out]: Configuration change type + * @param oldConf[out]: Old configuration + * @param alterPeer[out]: Changed Peer + * @return 0 query successful, -1 query exception failed */ virtual int GetConfChange(ConfigChangeType *type, Configuration *oldConf, Peer *alterPeer); /** - * @brief: 获取copyset node的状态值,用于比较多个副本的数据一致性 - * @param hash[out]: copyset node状态值 - * @return 0成功,-1失败 + * @brief: Obtain the status value of the copyset node for comparing data consistency across multiple replicas + * @param hash[out]: copyset node status value + * @return 0 succeeded, -1 failed */ virtual int GetHash(std::string *hash); /** - * @brief: 获取copyset node的status,实际调用的raft node的get_status接口 + * @brief: Get the status of the copyset node, actually calling the get_status interface of the Raft node * @param status[out]: copyset node status */ virtual void GetStatus(NodeStatus *status); @@ -292,14 +292,14 @@ class CopysetNode : public braft::StateMachine, virtual void GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status); /** - * 获取此copyset的leader上的status + * Obtain the status on the leader of this copyset * @param leaderStaus[out]: leader copyset node status - * @return 获取成功返回true,获取失败返回false + * @return returns true for successful acquisition, false for failed acquisition */ virtual bool GetLeaderStatus(NodeStatus *leaderStaus); /** - * 返回data store指针 + * Return data store pointer * @return */ virtual std::shared_ptr GetDataStore() const; @@ -311,19 +311,19 @@ class CopysetNode : public braft::StateMachine, virtual CurveSegmentLogStorage* GetLogStorage() const; /** - * 返回ConcurrentApplyModule + * Returning ConcurrentApplyModule */ virtual ConcurrentApplyModule* GetConcurrentApplyModule() const; /** - * 向copyset node propose一个op request + * Propose an op request to the copyset node * @param task */ virtual void Propose(const braft::Task &task); /** - * 获取复制组成员 - * @param peers:返回的成员列表(输出参数) + * Get replication group members + * @param peers: List of returned members (output parameters) * @return */ virtual void ListPeers(std::vector* peers); @@ -336,84 +336,83 @@ class CopysetNode : public braft::StateMachine, void InitRaftNodeOptions(const CopysetNodeOptions &options); /** - * 下面的接口都是继承StateMachine实现的接口 + * The following interfaces are all interfaces that inherit the implementation of StateMachine */ public: /** - * op log apply的时候回调函数 - * @param iter:可以batch的访问已经commit的log entries + * Callback function when applying op log + * @param iter: Allows batch access to already committed log entries. */ void on_apply(::braft::Iterator &iter) override; /** - * 复制关闭的时候调用此回调 + * Call this callback when replication is closed */ void on_shutdown() override; /** - * raft snapshot相关的接口,仅仅保存raft snapshot meta - * 和snapshot文件的list,这里并没有拷贝实际的数据,因为 - * 在块存储场景所有操作是幂等,所以,并不真实的拷贝数据 + * Interfaces related to raft snapshot, which only store raft snapshot meta + * and a list of snapshot files. Actual data is not copied here because + * in the context of block storage, all operations are idempotent, so there is no need to actually copy the data. */ void on_snapshot_save(::braft::SnapshotWriter *writer, ::braft::Closure *done) override; /** - * load日志有两种情况: - * 1. Follower节点Install snapshot追赶leader,这个时候 - * snapshot目录下面有chunk数据和snapshot数据 - * 2. 节点重启,会执行snapshot load,然后回放日志,这个时 - * 候snapshot目录下面没有数据,什么都不用做 - * TODO(wudemiao): install snapshot的时候会存在空间 - * double的可能性,考虑如下场景,follower落后,然后通过从 - * leader install snapshot恢复数据,其首先会从leader将 - * 所有数据下载过来,然后在调用snapshot load加载快照,这个 - * 期间空间占用了就double了;后期需要通过控制单盘参与install - * snapshot的数量 + * There are two scenarios for loading logs: + * 1. Follower nodes catch up with the leader by installing a snapshot. In this case, + * there are chunk data and snapshot data under the snapshot directory. + * 2. When a node restarts, it performs a snapshot load and then replays the logs. In + * this case, there is no data under the snapshot directory, so nothing needs to be done. + * TODO(wudemiao): When installing a snapshot, there is a possibility of doubling + * the space usage. Consider the following scenario: a follower lags behind and then + * recovers data by installing a snapshot from the leader. It will first download all + * the data from the leader and then call snapshot load to load the snapshot. During + * this period, the space usage doubles. Later, we need to control the number of disks + * participating in the installation of snapshots. */ int on_snapshot_load(::braft::SnapshotReader *reader) override; /** - * new leader在apply noop之后会调用此接口,表示此 leader可 - * 以提供read/write服务了。 - * @param term:当前leader任期 + * The new leader will call this interface after applying noop, indicating that this leader can provide read/write services. + * @param term: Current leader term */ void on_leader_start(int64_t term) override; /** - * leader step down的时候调用 - * @param status:复制组的状态 + * Called when the leader step is down + * @param status: The status of the replication group */ void on_leader_stop(const butil::Status &status) override; /** - * 复制组发生错误的时候调用 - * @param e:具体的 error + * Called when an error occurs in the replication group + * @param e: Specific error */ void on_error(const ::braft::Error &e) override; /** - * 配置变更日志entry apply的时候会调用此函数,目前会利用此接口 - * 更新配置epoch值 - * @param conf:当前复制组最新的配置 + * This function will be called when configuring the change log entry application, and currently this interface will be utilized + * Update configuration epoch value + * @param conf: The latest configuration of the current replication group * @param index log index */ void on_configuration_committed(const Configuration& conf, int64_t index) override; //NOLINT /** - * 当follower停止following主的时候调用 - * @param ctx:可以获取stop following的原因 + * Called when the follower stops following the main + * @param ctx: Can obtain the reason for stop following */ void on_stop_following(const ::braft::LeaderChangeContext &ctx) override; /** - * Follower或者Candidate发现新的leader后调用 - * @param ctx:leader变更上下,可以获取new leader和start following的原因 + * Called after the Follower or Candidate finds a new leader + * @param ctx: Change the leader up and down to obtain the reasons for the new leader and start following */ void on_start_following(const ::braft::LeaderChangeContext &ctx) override; /** - * 用于测试注入mock依赖 + * Used for testing injection mock dependencies */ public: void SetCSDateStore(std::shared_ptr datastore); @@ -435,16 +434,16 @@ class CopysetNode : public braft::StateMachine, // shared to sync pool static std::shared_ptr> copysetSyncPool_; /** - * 从文件中解析copyset配置版本信息 - * @param filePath:文件路径 - * @return 0: successs, -1 failed + * Parsing copyset configuration version information from a file + * @param filePath: File path + * @return 0: success, -1 fail */ int LoadConfEpoch(const std::string &filePath); /** - * 保存copyset配置版本信息到文件中 - * @param filePath:文件路径 - * @return 0 成功,-1 failed + * Save the copyset configuration version information to a file + * @param filePath: File path + * @return 0 success, -1 fail */ int SaveConfEpoch(const std::string &filePath); @@ -479,49 +478,49 @@ class CopysetNode : public braft::StateMachine, } private: - // 逻辑池 id + // Logical Pool ID LogicPoolID logicPoolId_; - // 复制组 id + // Copy Group ID CopysetID copysetId_; - // 复制组的配置 + // Configuration of replication groups Configuration conf_; - // 复制组的配置操作锁 + // Configuration operation lock for replication group mutable std::mutex confLock_; - // 复制组的配置版本 + // Copy the configuration version of the group std::atomic epoch_; - // 复制组副本的peer id + // Peer ID of the replication group replica PeerId peerId_; - // braft Node的配置参数 + // Configuration parameters for the braft Node NodeOptions nodeOptions_; - // CopysetNode对应的braft Node + // The braft Node corresponding to CopysetNode std::shared_ptr raftNode_; - // chunk file的绝对目录 + // Absolute directory for chunk files std::string chunkDataApath_; - // chunk file的相对目录 + // Relative directory for chunk files std::string chunkDataRpath_; - // copyset绝对路径 + // copyset absolute path std::string copysetDirPath_; - // 文件系统适配器 + // File system adapter std::shared_ptr fs_; - // Chunk持久化操作接口 + // Chunk Persistence Operation Interface std::shared_ptr dataStore_; // The log storage for braft CurveSegmentLogStorage* logStorage_; - // 并发模块 + // Concurrent module ConcurrentApplyModule *concurrentapply_ = nullptr; - // 配置版本持久化工具接口 + // Configure version persistence tool interface std::unique_ptr epochFile_; - // 复制组的apply index + // Apply index of replication group std::atomic appliedIndex_; - // 复制组当前任期,如果<=0表明不是leader + // Copy the current tenure of the group. If<=0, it indicates that it is not a leader std::atomic leaderTerm_; - // 复制组数据回收站目录 + // Copy Group Data Recycle Bin Directory std::string recyclerUri_; - // 复制组的metric信息 + // Copy the metric information of the group CopysetMetricPtr metric_; - // 正在进行中的配置变更 + // Configuration changes in progress std::shared_ptr configChange_; - // transfer leader的目标,状态为TRANSFERRING时有效 + // The target of the transfer leader is valid when the status is TRANSFERRING Peer transferee_; int64_t lastSnapshotIndex_; // scan status diff --git a/src/chunkserver/copyset_node_manager.cpp b/src/chunkserver/copyset_node_manager.cpp index 873f5334e9..ca2e919109 100755 --- a/src/chunkserver/copyset_node_manager.cpp +++ b/src/chunkserver/copyset_node_manager.cpp @@ -71,7 +71,7 @@ int CopysetNodeManager::Run() { CopysetNode::copysetSyncPool_->Start(copysetNodeOptions_.syncConcurrency); assert(copysetNodeOptions_.syncConcurrency > 0); int ret = 0; - // 启动线程池 + // Start Thread Pool if (copysetLoader_ != nullptr) { ret = copysetLoader_->Start( copysetNodeOptions_.loadConcurrency); @@ -82,7 +82,7 @@ int CopysetNodeManager::Run() { } } - // 启动加载已有的copyset + // Start loading existing copyset ret = ReloadCopysets(); if (ret == 0) { loadFinished_.exchange(true, std::memory_order_acq_rel); @@ -156,13 +156,13 @@ int CopysetNodeManager::ReloadCopysets() { } } - // 如果加载成功,则等待所有copyset加载完成,关闭线程池 + //If loading is successful, wait for all copysets to load and close the thread pool if (copysetLoader_ != nullptr) { while (copysetLoader_->QueueSize() != 0) { ::sleep(1); } - // queue size为0,但是线程池中的线程仍然可能还在执行 - // stop内部会去join thread,以此保证所有任务执行完以后再退出 + // Even when the queue size is 0, the threads in the thread pool may still be executing. + // The 'stop' function internally performs thread joining to ensure that all tasks are completed before exiting. copysetLoader_->Stop(); copysetLoader_ = nullptr; } @@ -183,8 +183,8 @@ void CopysetNodeManager::LoadCopyset(const LogicPoolID &logicPoolId, << (needCheckLoadFinished ? "Yes." : "No."); uint64_t beginTime = TimeUtility::GetTimeofDayMs(); - // chunkserver启动加载copyset阶段,会拒绝外部的创建copyset请求 - // 因此不会有其他线程加载或者创建相同copyset,此时不需要加锁 + // chunkserver starts the loading copyset phase and will reject external requests to create copysets + // Therefore, no other threads will load or create the same copyset, and locking is not necessary at this time Configuration conf; std::shared_ptr copysetNode = CreateCopysetNodeUnlocked(logicPoolId, copysetId, conf); @@ -224,9 +224,9 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( } NodeStatus leaderStaus; bool getSuccess = node->GetLeaderStatus(&leaderStaus); - // 获取leader状态失败一般是由于还没选出leader或者leader心跳还未发送到当前节点 - // 正常通过几次重试可以获取到leader信息,如果重试多次都未获取到 - // 则认为copyset当前可能无法选出leader,直接退出 + // Failure to obtain leader status is usually because a leader has not been elected yet, or the leader's heartbeat has not been received by the current node. + // Typically, leader information can be obtained through several retries. + // If multiple retries fail to obtain the information, it is assumed that the copyset may not be able to elect a leader at the moment, and the operation exits directly. if (!getSuccess) { ++retryTimes; ::usleep(1000 * copysetNodeOptions_.electionTimeoutMs); @@ -235,8 +235,8 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( NodeStatus status; node->GetStatus(&status); - // 当前副本的最后一个日志落后于leader上保存的第一个日志 - // 这种情况下此副本会通过安装快照恢复,可以忽略避免阻塞检查线程 + // When the last log of the current replica lags behind the first log saved on the leader, + // in this situation, the replica will recover by installing a snapshot, and it can be safely ignored to avoid blocking the checking thread. bool mayInstallSnapshot = leaderStaus.first_index > status.last_index; if (mayInstallSnapshot) { LOG(WARNING) << "Copyset " @@ -250,7 +250,7 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( return false; } - // 判断当前副本已经apply的日志是否接近已经committed的日志 + // Determine whether the logs that have been applied to the current replica are close to the logs that have been committed int64_t margin = leaderStaus.committed_index - status.known_applied_index; bool catchupLeader = margin @@ -276,7 +276,7 @@ bool CopysetNodeManager::CheckCopysetUntilLoadFinished( std::shared_ptr CopysetNodeManager::GetCopysetNode( const LogicPoolID &logicPoolId, const CopysetID ©setId) const { - /* 加读锁 */ + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); @@ -288,7 +288,7 @@ std::shared_ptr CopysetNodeManager::GetCopysetNode( void CopysetNodeManager::GetAllCopysetNodes( std::vector *nodes) const { - /* 加读锁 */ + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); for (auto it = copysetNodeMap_.begin(); it != copysetNodeMap_.end(); ++it) { nodes->push_back(it->second); @@ -299,16 +299,16 @@ bool CopysetNodeManager::CreateCopysetNode(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 如果本地copyset还未全部加载完成,不允许外部创建copyset + // If the local copyset has not been fully loaded yet, external copyset creation is not allowed if (!loadFinished_.load(std::memory_order_acquire)) { LOG(WARNING) << "Create copyset failed: load unfinished " << ToGroupIdString(logicPoolId, copysetId); return false; } - // copysetnode析构的时候会去调shutdown,可能导致协程切出 - // 所以创建copysetnode失败的时候,不能占着写锁,等写锁释放后再析构 + // When copysetnode is deconstructed, shutdown may be called, which may lead to coprocessor disconnection + // So when creating a copysetnode fails, it cannot occupy the write lock, wait for the write lock to be released before destructing std::shared_ptr copysetNode = nullptr; - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); if (copysetNodeMap_.end() == copysetNodeMap_.find(groupId)) { copysetNode = std::make_shared(logicPoolId, @@ -428,18 +428,18 @@ bool CopysetNodeManager::DeleteCopysetNode(const LogicPoolID &logicPoolId, GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { @@ -460,18 +460,18 @@ bool CopysetNodeManager::PurgeCopysetNodeData(const LogicPoolID &logicPoolId, GroupId groupId = ToGroupId(logicPoolId, copysetId); { - // 加读锁 + // Read lock ReadLockGuard readLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { - // TODO(yyk) 这部分可能存在死锁的风险,后续需要评估 + // TODO(yyk) There may be a risk of deadlock, which needs to be evaluated in the future it->second->Fini(); ret = true; } } { - // 加写锁 + // Write lock WriteLockGuard writeLockGuard(rwLock_); auto it = copysetNodeMap_.find(groupId); if (copysetNodeMap_.end() != it) { @@ -520,7 +520,7 @@ bool CopysetNodeManager::DeleteBrokenCopyset(const LogicPoolID& poolId, bool CopysetNodeManager::IsExist(const LogicPoolID &logicPoolId, const CopysetID ©setId) { - /* 加读锁 */ + /*Read lock*/ ReadLockGuard readLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); return copysetNodeMap_.end() != copysetNodeMap_.find(groupId); @@ -529,7 +529,7 @@ bool CopysetNodeManager::IsExist(const LogicPoolID &logicPoolId, bool CopysetNodeManager::InsertCopysetNodeIfNotExist( const LogicPoolID &logicPoolId, const CopysetID ©setId, std::shared_ptr node) { - /* 加写锁 */ + /*Write lock*/ WriteLockGuard writeLockGuard(rwLock_); GroupId groupId = ToGroupId(logicPoolId, copysetId); auto it = copysetNodeMap_.find(groupId); diff --git a/src/chunkserver/copyset_node_manager.h b/src/chunkserver/copyset_node_manager.h index 8294b21e0f..eea593355a 100755 --- a/src/chunkserver/copyset_node_manager.h +++ b/src/chunkserver/copyset_node_manager.h @@ -44,13 +44,13 @@ using curve::common::TaskThreadPool; class ChunkOpRequest; /** - * Copyset Node的管理者 + * Manager of Copyset Node */ class CopysetNodeManager : public curve::common::Uncopyable { public: using CopysetNodePtr = std::shared_ptr; - // 单例,仅仅在 c++11或者更高版本下正确 + // Single example, only correct in c++11 or higher versions static CopysetNodeManager &GetInstance() { static CopysetNodeManager instance; return instance; @@ -61,46 +61,46 @@ class CopysetNodeManager : public curve::common::Uncopyable { int Fini(); /** - * @brief 加载目录下的所有copyset + * @brief Load all copysets in the directory * - * @return 0表示加载成功,非0表示加载失败 + * @return 0 indicates successful loading, non 0 indicates failed loading */ int ReloadCopysets(); /** - * 创建copyset node,两种情况需要创建copyset node - * TODO(wudemiao): 后期替换之后删除掉 - * 1.集群初始化,创建copyset - * 2.恢复的时候add peer + * To create a copyset node, there are two situations where you need to create a copyset node + * TODO(wudemiao): Delete after later replacement + * 1. Cluster initialization, creating copyset + * 2. add peer during recovery */ bool CreateCopysetNode(const LogicPoolID &logicPoolId, const CopysetID ©setId, const Configuration &conf); /** - * 都是创建copyset,目前两个同时存在,后期仅仅保留一个 + * Both are creating copysets, currently both exist simultaneously, and only one will be retained in the future */ bool CreateCopysetNode(const LogicPoolID &logicPoolId, const CopysetID ©setId, const std::vector peers); /** - * 删除copyset node内存实例(停止copyset, 销毁copyset内存实例并从copyset - * manager的copyset表中清除copyset表项,并不影响盘上的copyset持久化数据) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Delete the copyset node memory instance (stop copyset, destroy the copyset memory instance, and remove it from the copyset + * Clearing the copyset table entry in the manager's copyset table does not affect the persistence data of the copyset on the disk + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ bool DeleteCopysetNode(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** - * 彻底删除copyset node内存数据(停止copyset, 销毁copyset内存实例并从 - * copyset manager的copyset表中清除copyset表项,并将copyset持久化数据从盘 - * 上彻底删除) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @return true 成功,false失败 + * Completely delete the copyset node's memory data (stop copyset, destroy the copyset memory instance, and remove it from the + * Clear the copyset table entries in the copyset manager's copyset table and persist the copyset data from the disk + * Completely delete on) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true succeeded, false failed */ bool PurgeCopysetNodeData(const LogicPoolID &logicPoolId, const CopysetID ©setId); @@ -115,34 +115,34 @@ class CopysetNodeManager : public curve::common::Uncopyable { const CopysetID& copysetId); /** - * 判断指定的copyset是否存在 - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return true存在,false不存在 + * Determine whether the specified copyset exists + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return true exists, false does not exist */ bool IsExist(const LogicPoolID &logicPoolId, const CopysetID ©setId); /** - * 获取指定的copyset - * @param logicPoolId:逻辑池子id - * @param copysetId:复制组id - * @return nullptr则为没查询到 + * Get the specified copyset + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @return nullptr means that no query was found */ virtual CopysetNodePtr GetCopysetNode(const LogicPoolID &logicPoolId, const CopysetID ©setId) const; /** - * 查询所有的copysets - * @param nodes:出参,返回所有的copyset + * Query all copysets + * @param nodes: Issue parameters and return all copysets */ void GetAllCopysetNodes(std::vector *nodes) const; /** - * 添加RPC service - * TODO(wudemiao): 目前仅仅用于测试,后期完善了会删除掉 - * @param server:rpc Server - * @param listenAddress:监听的地址 - * @return 0成功,-1失败 + * Add RPC service + * TODO(wudemiao): Currently only used for testing, and will be removed after later refinement + * @param server: rpc Server + * @param listenAddress: The address to listen to + * @return 0 succeeded, -1 failed */ int AddService(brpc::Server *server, const butil::EndPoint &listenAddress); @@ -160,24 +160,24 @@ class CopysetNodeManager : public curve::common::Uncopyable { } /** - * 加载copyset,包括新建一个copyset或者重启一个copyset - * @param logicPoolId: 逻辑池id + * Load copyset, including creating a new copyset or restarting a copyset + * @param logicPoolId: Logical Pool ID * @param copysetId: copyset id - * @param needCheckLoadFinished: 是否需要判断copyset加载完成 + * @param needCheckLoadFinished: Do you need to determine if the copyset loading is complete */ void LoadCopyset(const LogicPoolID &logicPoolId, const CopysetID ©setId, bool needCheckLoadFinished); /** - * 检测指定的copyset状态,直到copyset加载完成或出现异常 - * @param node: 指定的copyset node - * @return true表示加载成功,false表示检测过程中出现异常 + * Detect the specified copyset state until the copyset load is completed or an exception occurs + * @param node: The specified copyset node + * @return true indicates successful loading, while false indicates an exception occurred during the detection process */ bool CheckCopysetUntilLoadFinished(std::shared_ptr node); /** - * 获取copysetNodeManager加载copyset的状态 - * @return false-copyset未加载完成 true-copyset已加载完成 + * Obtain the status of copysetNodeManager loading copyset + * @return false-copyset not loaded complete, true-copyset loaded complete */ virtual bool LoadFinished(); @@ -189,23 +189,23 @@ class CopysetNodeManager : public curve::common::Uncopyable { private: /** - * 如果指定copyset不存在,则将copyset插入到map当中(线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param node:要插入的copysetnode - * @return copyset不存在,则插入到map并返回true; - * copyset如果存在,则返回false + * If the specified copyset does not exist, insert the copyset into the map (thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param node: The copysetnode to be inserted + * @return If the copyset does not exist, insert it into the map and return true; + * If copyset exists, return false */ bool InsertCopysetNodeIfNotExist(const LogicPoolID &logicPoolId, const CopysetID ©setId, std::shared_ptr node); /** - * 创建一个新的copyset或加载一个已存在的copyset(非线程安全) - * @param logicPoolId:逻辑池id - * @param copysetId:复制组id - * @param conf:此copyset的配置成员 - * @return 创建或加载成功返回copysetnode,否则返回nullptr + * Create a new copyset or load an existing copyset (non thread safe) + * @param logicPoolId: Logical Pool ID + * @param copysetId: Copy group ID + * @param conf: The configuration members of this copyset + * @return Successfully created or loaded, returns copysetnode, otherwise returns nullptr */ std::shared_ptr CreateCopysetNodeUnlocked( const LogicPoolID &logicPoolId, @@ -215,17 +215,17 @@ class CopysetNodeManager : public curve::common::Uncopyable { private: using CopysetNodeMap = std::unordered_map>; - // 保护复制组 map的读写锁 + // Protect the read write lock of the replication group map mutable BthreadRWLock rwLock_; - // 复制组map + // Copy Group Map CopysetNodeMap copysetNodeMap_; - // 复制组配置选项 + // Copy Group Configuration Options CopysetNodeOptions copysetNodeOptions_; - // 控制copyset并发启动的数量 + // Control the number of concurrent starts of copyset std::shared_ptr> copysetLoader_; - // 表示copyset node manager当前是否正在运行 + // Indicates whether the copyset node manager is currently running Atomic running_; - // 表示copyset node manager当前是否已经完成加载 + // Indicates whether the copyset node manager has currently completed loading Atomic loadFinished_; }; diff --git a/src/chunkserver/copyset_service.cpp b/src/chunkserver/copyset_service.cpp index e09516c0ad..28fe41eb3a 100755 --- a/src/chunkserver/copyset_service.cpp +++ b/src/chunkserver/copyset_service.cpp @@ -42,7 +42,7 @@ void CopysetServiceImpl::CreateCopysetNode(RpcController *controller, LOG(INFO) << "Received create copyset request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 解析request中的peers + // Analyzing Peers in Request Configuration conf; for (int i = 0; i < request->peerid_size(); ++i) { PeerId peer; @@ -171,7 +171,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, LOG(INFO) << "Received GetCopysetStatus request: " << ToGroupIdString(request->logicpoolid(), request->copysetid()); - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -183,7 +183,7 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, return; } - // 获取raft node status + // Obtain raft node status NodeStatus status; nodePtr->GetStatus(&status); response->set_state(status.state); @@ -204,13 +204,13 @@ void CopysetServiceImpl::GetCopysetStatus(RpcController *controller, response->set_lastindex(status.last_index); response->set_diskindex(status.disk_index); - // 获取配置的版本 + // Obtain the version of the configuration response->set_epoch(nodePtr->GetConfEpoch()); /** - * 考虑到query hash需要读取copyset的所有chunk数据,然后计算hash值 - * 是一个非常耗时的操作,所以在request会设置query hash字段,如果 - * 为false,那么就不需要查询copyset的hash值 + * Considering that calculating the hash value for query hash requires reading all chunk data from a copyset, + * which is a very time-consuming operation, the request will have a "query hash" field. If it is set to false, + * then there is no need to query the hash value of the copyset. */ if (request->queryhash()) { std::string hash; diff --git a/src/chunkserver/copyset_service.h b/src/chunkserver/copyset_service.h index fabf6df8fc..8e6775e1f7 100755 --- a/src/chunkserver/copyset_service.h +++ b/src/chunkserver/copyset_service.h @@ -34,7 +34,7 @@ using ::google::protobuf::Closure; class CopysetNodeManager; /** - * 复制组管理的Rpc服务,目前仅有创建复制组 + * The Rpc service for replication group management currently only creates replication groups */ class CopysetServiceImpl : public CopysetService { public: @@ -43,7 +43,7 @@ class CopysetServiceImpl : public CopysetService { ~CopysetServiceImpl() {} /** - * 创建复制组,一次只能创建一个 + * Create replication groups, only one can be created at a time */ void CreateCopysetNode(RpcController *controller, const CopysetRequest *request, @@ -51,7 +51,7 @@ class CopysetServiceImpl : public CopysetService { Closure *done); /* - * 创建复制组,一次可以创建多个 + * Create replication groups, multiple can be created at once */ void CreateCopysetNode2(RpcController *controller, const CopysetRequest2 *request, @@ -72,7 +72,7 @@ class CopysetServiceImpl : public CopysetService { Closure *done); private: - // 复制组管理者 + // Copy Group Manager CopysetNodeManager* copysetNodeManager_; }; diff --git a/src/chunkserver/heartbeat.cpp b/src/chunkserver/heartbeat.cpp index 0e756b29c6..9e1d46eeab 100644 --- a/src/chunkserver/heartbeat.cpp +++ b/src/chunkserver/heartbeat.cpp @@ -68,13 +68,13 @@ int Heartbeat::Init(const HeartbeatOptions &options) { csEp_ = butil::EndPoint(csIp, options_.port); LOG(INFO) << "Chunkserver address: " << options_.ip << ":" << options_.port; - // mdsEps不能为空 + // mdsEps cannot be empty ::curve::common::SplitString(options_.mdsListenAddr, ",", &mdsEps_); if (mdsEps_.empty()) { LOG(ERROR) << "Invalid mds ip provided: " << options_.mdsListenAddr; return -1; } - // 检查每个地址的合法性 + // Check the legality of each address for (auto addr : mdsEps_) { butil::EndPoint endpt; if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { @@ -88,10 +88,10 @@ int Heartbeat::Init(const HeartbeatOptions &options) { copysetMan_ = options.copysetNodeManager; - // 初始化timer + // Initialize timer waitInterval_.Init(options_.intervalSec * 1000); - // 获取当前unix时间戳 + // Obtain the current Unix timestamp startUpTime_ = ::curve::common::TimeUtility::GetTimeofDaySec(); // init scanManager @@ -417,13 +417,13 @@ int Heartbeat::ExecTask(const HeartbeatResponse& response) { CopysetNodePtr copyset = copysetMan_->GetCopysetNode( conf.logicalpoolid(), conf.copysetid()); - // 判断copyconf是否合法 + // Determine whether copyconf is legal if (!HeartbeatHelper::CopySetConfValid(conf, copyset)) { continue; } - // 解析该chunkserver上的copyset是否需要删除 - // 需要删除则清理copyset + // Resolve whether the copyset on the chunkserver needs to be deleted + // If deletion is required, clean the copyset if (HeartbeatHelper::NeedPurge(csEp_, conf, copyset)) { LOG(INFO) << "Clean peer " << csEp_ << " of copyset(" << conf.logicalpoolid() << "," << conf.copysetid() @@ -433,7 +433,7 @@ int Heartbeat::ExecTask(const HeartbeatResponse& response) { continue; } - // 解析是否有配置变更需要执行 + // Resolve if there are any configuration changes that need to be executed if (!conf.has_type()) { LOG(INFO) << "Failed to parse task for copyset(" << conf.logicalpoolid() << "," << conf.copysetid() @@ -442,7 +442,7 @@ int Heartbeat::ExecTask(const HeartbeatResponse& response) { continue; } - // 如果有配置变更需要执行,下发变更到copyset + // If there are configuration changes that need to be executed, issue the changes to the copyset if (!HeartbeatHelper::PeerVaild(conf.configchangeitem().address())) { continue; } @@ -458,7 +458,7 @@ int Heartbeat::ExecTask(const HeartbeatResponse& response) { continue; } - // 根据不同的变更类型下发配置 + // Distribute configurations based on different change types switch (conf.type()) { case curve::mds::heartbeat::TRANSFER_LEADER: { @@ -569,7 +569,7 @@ void Heartbeat::HeartbeatWorker() { LOG(INFO) << "Starting Heartbeat worker thread."; - // 处理配置等于0等异常情况 + // Handling abnormal situations such as configuration equal to 0 if (options_.intervalSec <= 4) { errorIntervalSec = 2; } else { diff --git a/src/chunkserver/heartbeat.h b/src/chunkserver/heartbeat.h index df86d8e88a..c410a1ed7e 100644 --- a/src/chunkserver/heartbeat.h +++ b/src/chunkserver/heartbeat.h @@ -56,7 +56,7 @@ using TaskStatus = butil::Status; using CopysetNodePtr = std::shared_ptr; /** - * 心跳子系统选项 + * Heartbeat subsystem options */ struct HeartbeatOptions { ChunkServerID chunkserverId; @@ -75,7 +75,7 @@ struct HeartbeatOptions { }; /** - * 心跳子系统处理模块 + * Heartbeat subsystem processing module */ class Heartbeat { public: @@ -83,107 +83,107 @@ class Heartbeat { ~Heartbeat() {} /** - * @brief 初始化心跳子系统 - * @param[in] options 心跳子系统选项 - * @return 0:成功,非0失败 + * @brief Initialize heartbeat subsystem + * @param[in] options Heartbeat subsystem options + * @return 0: Success, non 0 failure */ int Init(const HeartbeatOptions& options); /** - * @brief 清理心跳子系统 - * @return 0:成功,非0失败 + * @brief Clean heartbeat subsystem + * @return 0: Success, non 0 failure */ int Fini(); /** - * @brief 启动心跳子系统 - * @return 0:成功,非0失败 + * @brief: Start the heartbeat subsystem + * @return 0: Success, non 0 failure */ int Run(); private: /** - * @brief 停止心跳子系统 - * @return 0:成功,非0失败 + * @brief Stop heartbeat subsystem + * @return 0: Success, non 0 failure */ int Stop(); /* - * 心跳工作线程 + * Heartbeat Worker Thread */ void HeartbeatWorker(); /* - * 获取Chunkserver存储空间信息 + * Obtain Chunkserver storage space information */ int GetFileSystemSpaces(size_t* capacity, size_t* free); /* - * 构建心跳消息的Copyset信息项 + * Building a Copyset information item for heartbeat messages */ int BuildCopysetInfo(curve::mds::heartbeat::CopySetInfo* info, CopysetNodePtr copyset); /* - * 构建心跳请求 + * Build Heartbeat Request */ int BuildRequest(HeartbeatRequest* request); /* - * 发送心跳消息 + * Send heartbeat message */ int SendHeartbeat(const HeartbeatRequest& request, HeartbeatResponse* response); /* - * 执行心跳任务 + * Perform Heartbeat Tasks */ int ExecTask(const HeartbeatResponse& response); /* - * 输出心跳请求信息 + * Output heartbeat request information */ void DumpHeartbeatRequest(const HeartbeatRequest& request); /* - * 输出心跳回应信息 + * Output heartbeat response information */ void DumpHeartbeatResponse(const HeartbeatResponse& response); /* - * 清理复制组实例及持久化数据 + * Clean up replication group instances and persist data */ TaskStatus PurgeCopyset(LogicPoolID poolId, CopysetID copysetId); private: - // 心跳线程 + // Heartbeat Thread Thread hbThread_; - // 控制心跳模块运行或停止 + // Control the heartbeat module to run or stop std::atomic toStop_; - // 使用定时器 + // Using a timer ::curve::common::WaitInterval waitInterval_; - // Copyset管理模块 + // Copyset Management Module CopysetNodeManager* copysetMan_; - // ChunkServer目录 + // ChunkServer directory std::string storePath_; - // 心跳选项 + // Heartbeat Options HeartbeatOptions options_; - // MDS的地址 + // MDS address std::vector mdsEps_; - // 当前供服务的mds + // Current mds for service int inServiceIndex_; - // ChunkServer本身的地址 + // ChunkServer's own address butil::EndPoint csEp_; - // 模块初始化时间, unix时间 + // Module initialization time, unix time uint64_t startUpTime_; ScanManager *scanMan_; diff --git a/src/chunkserver/heartbeat_helper.cpp b/src/chunkserver/heartbeat_helper.cpp index 02a2fc65c9..fc6aa1fe09 100644 --- a/src/chunkserver/heartbeat_helper.cpp +++ b/src/chunkserver/heartbeat_helper.cpp @@ -32,22 +32,22 @@ namespace curve { namespace chunkserver { bool HeartbeatHelper::BuildNewPeers( const CopySetConf &conf, std::vector *newPeers) { - // 检验目标节点和待删除节点是否有效 + // Verify if the target node and the node to be deleted are valid std::string target(conf.configchangeitem().address()); std::string old(conf.oldpeer().address()); if (!PeerVaild(target) || !PeerVaild(old)) { return false; } - // 生成newPeers + // Generate newPeers for (int i = 0; i < conf.peers_size(); i++) { std::string peer = conf.peers(i).address(); - // 检验conf中的peer是否有效 + // Verify if the peer in conf is valid if (!PeerVaild(peer)) { return false; } - // newPeers中不包含old副本 + // newPeers does not contain old copies if (conf.peers(i).address() != old) { newPeers->emplace_back(conf.peers(i)); } @@ -64,7 +64,7 @@ bool HeartbeatHelper::PeerVaild(const std::string &peer) { bool HeartbeatHelper::CopySetConfValid( const CopySetConf &conf, const CopysetNodePtr ©set) { - // chunkserver中不存在需要变更的copyset, 报警 + // There is no copyset that needs to be changed in chunkserver, alarm if (copyset == nullptr) { LOG(ERROR) << "Failed to find copyset(" << conf.logicalpoolid() << "," << conf.copysetid() << "), groupId: " @@ -72,7 +72,7 @@ bool HeartbeatHelper::CopySetConfValid( return false; } - // 下发的变更epoch < copyset实际的epoch,报错 + // The issued change epoch is less than the actual epoch of the copyset, and an error is reported if (conf.epoch() < copyset->GetConfEpoch()) { LOG(WARNING) << "Config change epoch:" << conf.epoch() << " is smaller than current:" << copyset->GetConfEpoch() @@ -90,7 +90,7 @@ bool HeartbeatHelper::CopySetConfValid( bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, const CopysetNodePtr ©set) { (void)copyset; - // CLDCFS-1004 bug-fix: mds下发epoch为0, 配置为空的copyset + // CLDCFS-1004 bug-fix: mds issued a copyset with epoch 0 and empty configuration if (0 == conf.epoch() && conf.peers().empty()) { LOG(INFO) << "Clean copyset " << ToGroupIdStr(conf.logicalpoolid(), conf.copysetid()) @@ -99,7 +99,7 @@ bool HeartbeatHelper::NeedPurge(const butil::EndPoint &csEp, return true; } - // 该chunkserrver不在copyset的配置中,需要清理 + // The chunkserrver is not in the configuration of the copyset and needs to be cleaned up std::string chunkserverEp = std::string(butil::endpoint2str(csEp).c_str()); for (int i = 0; i < conf.peers_size(); i++) { if (conf.peers(i).address().find(chunkserverEp) != std::string::npos) { diff --git a/src/chunkserver/heartbeat_helper.h b/src/chunkserver/heartbeat_helper.h index 43ada5f6ea..130dcd7e86 100644 --- a/src/chunkserver/heartbeat_helper.h +++ b/src/chunkserver/heartbeat_helper.h @@ -39,55 +39,55 @@ using CopysetNodePtr = std::shared_ptr; class HeartbeatHelper { public: /** - * 根据mds下发的conf构建出指定复制组的新配置,给ChangePeer使用 + * Build a new configuration for the specified replication group based on the conf issued by mds, and use it for ChangePeer * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[out] newPeers 指定复制组的目标配置 + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[out] newPeers specifies the target configuration for the replication group * - * @return false-生成newpeers失败 true-生成newpeers成功 + * @return false - Failed to generate newpeers, true - Successfully generated newpeers */ static bool BuildNewPeers( const CopySetConf &conf, std::vector *newPeers); /** - * 判断字符串peer(正确的形式为: ip:port:0)是否有效 + * Determine whether the string peer (correct form: ip:port:0) is valid * - * @param[in] peer 指定字符串 + * @param[in] peer specifies the string * - * @return false-无效 true-有效 + * @return false - invalid, true - valid */ static bool PeerVaild(const std::string &peer); /** - * 判断mds下发过来的copysetConf是否合法,以下两种情况不合法: - * 1. chunkserver中不存在该copyset - * 2. mds下发的copyset中记录的epoch小于chunkserver上copyset此时的epoch + * Determine whether the copysetConf sent by mds is legal, and the following two situations are illegal: + * 1. The copyset does not exist in chunkserver + * 2. The epoch recorded in the copyset issued by mds is smaller than the epoch recorded in the copyset on chunkserver at this time * - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-copysetConf不合法,true-copysetConf合法 + * @return false-copysetConf is illegal, true-copysetConf is legal */ static bool CopySetConfValid( const CopySetConf &conf, const CopysetNodePtr ©set); /** - * 判断chunkserver(csEp)中指定copyset是否需要删除 + * Determine whether the specified copyset in chunkserver(csEp) needs to be deleted * - * @param[in] csEp 该chunkserver的ip:port - * @param[in] conf mds下发的变更命令needupdatecopyset[i] - * @param[in] copyset chunkserver上对应的copyset + * @param[in] csEp The ip:port of this chunkserver + * @param[in] conf mds issued the change command needupdatecopyset[i] + * @param[in] copyset The corresponding copyset on chunkserver * - * @return false-该chunkserver上的copyset无需清理; - * true-该chunkserver上的copyset需要清理 + * @return false-The copyset on the chunkserver does not need to be cleaned; + * true-The copyset on this chunkserver needs to be cleaned up */ static bool NeedPurge(const butil::EndPoint &csEp, const CopySetConf &conf, const CopysetNodePtr ©set); /** - * 判断指定chunkserver copyset是否已经加载完毕 + * Determine whether the specified chunkserver copyset has been loaded completely * - * @return false-copyset加载完毕 true-copyset未加载完成 + * @return false-copyset loading completed, true-copyset not loaded completed */ static bool ChunkServerLoadCopySetFin(const std::string ipPort); }; diff --git a/src/chunkserver/inflight_throttle.h b/src/chunkserver/inflight_throttle.h index 86af93daf7..3830976030 100644 --- a/src/chunkserver/inflight_throttle.h +++ b/src/chunkserver/inflight_throttle.h @@ -30,7 +30,7 @@ namespace curve { namespace chunkserver { /** - * 负责控制最大inflight request数量 + * Responsible for controlling the maximum number of inflight requests */ class InflightThrottle { public: @@ -40,8 +40,8 @@ class InflightThrottle { virtual ~InflightThrottle() = default; /** - * @brief: 判断是否过载 - * @return true,过载,false没有过载 + * @brief: Determine if there is an overload + * @return true, overload, false No overload */ inline bool IsOverLoad() { if (kMaxInflightRequest_ >= @@ -53,23 +53,23 @@ class InflightThrottle { } /** - * @brief: inflight request计数加1 + * @brief: inflight request count plus 1 */ inline void Increment() { inflightRequestCount_.fetch_add(1, std::memory_order_relaxed); } /** - * @brief: inflight request计数减1 + * @brief: inflight request count minus 1 */ inline void Decrement() { inflightRequestCount_.fetch_sub(1, std::memory_order_relaxed); } private: - // 当前inflight request数量 + // Current number of inflight requests std::atomic inflightRequestCount_; - // 最大的inflight request数量 + // Maximum number of inflight requests const uint64_t kMaxInflightRequest_; }; diff --git a/src/chunkserver/op_request.cpp b/src/chunkserver/op_request.cpp index 817e65c79f..1dac786af2 100755 --- a/src/chunkserver/op_request.cpp +++ b/src/chunkserver/op_request.cpp @@ -71,8 +71,8 @@ void ChunkOpRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been successfully handed over to the raft for processing, + * So, done_ cannot be called, only if the proposal fails, it needs to be returned in advance */ if (0 == Propose(request_, cntl_ ? &cntl_->request_attachment() : nullptr)) { @@ -82,7 +82,7 @@ void ChunkOpRequest::Process() { int ChunkOpRequest::Propose(const ChunkRequest *request, const butil::IOBuf *data) { - // 打包op request为task + // Pack op request as task braft::Task task; butil::IOBuf log; if (0 != Encode(request, data, &log)) { @@ -93,10 +93,10 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, task.data = &log; task.done = new ChunkClosure(shared_from_this()); /** - * 由于apply是异步的,有可能某个节点在term1是leader,apply了一条log, - * 但是中间发生了主从切换,在很短的时间内这个节点又变为term3的leader, - * 之前apply的日志才开始进行处理,这种情况下要实现严格意义上的复制状态 - * 机,需要解决这种ABA问题,可以在apply的时候设置leader当时的term + * Due to the asynchronous nature of the application, it is possible that a node in term1 is a leader and has applied a log, + * But there was a master-slave switch in the middle, and in a short period of time, this node became the leader of term3 again, + * Previously applied logs were only processed, in which case strict replication status needs to be implemented + * To solve this ABA problem, you can set the term of the leader at the time of application */ task.expected_term = node_->LeaderTerm(); @@ -106,8 +106,8 @@ int ChunkOpRequest::Propose(const ChunkRequest *request, } void ChunkOpRequest::RedirectChunkRequest() { - // 编译时加上 --copt -DUSE_BTHREAD_MUTEX - // 否则可能发生死锁: CLDCFS-1120 + // Compile with --copt -DUSE_BTHREAD_MUTEX + // Otherwise, a deadlock may occur: CLDCFS-1120 // PeerId leader = node_->GetLeaderId(); // if (!leader.is_empty()) { // response_->set_redirect(leader.to_string()); @@ -221,7 +221,7 @@ void DeleteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing auto ret = datastore->DeleteChunk(request.chunkid(), request.sn()); if (CSErrorCode::Success == ret) @@ -299,7 +299,7 @@ void ReadChunkRequest::Process() { void ReadChunkRequest::OnApply(uint64_t index, ::google::protobuf::Closure *done) { - // 先清除response中的status,以保证CheckForward后的判断的正确性 + // Clear the status in the response first to ensure the correctness of the judgment after CheckForward response_->clear_status(); CSChunkInfo chunkInfo; @@ -307,7 +307,7 @@ void ReadChunkRequest::OnApply(uint64_t index, &chunkInfo); do { bool needLazyClone = false; - // 如果需要Read的chunk不存在,但是请求包含Clone源信息,则尝试从Clone源读取数据 + // If the chunk that needs to be read does not exist, but the request contains Clone source information, try reading data from the Clone source if (CSErrorCode::ChunkNotExistError == errorCode) { if (existCloneInfo(request_)) { needLazyClone = true; @@ -324,14 +324,14 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果需要从源端拷贝数据,需要将请求转发给clone manager处理 + // If you need to copy data from the source, you need to forward the request to the clone manager for processing if ( needLazyClone || NeedClone(chunkInfo) ) { applyIndex = index; std::shared_ptr cloneTask = cloneMgr_->GenerateCloneTask( std::dynamic_pointer_cast(shared_from_this()), done); - // TODO(yyk) 尽量不能阻塞队列,后面要具体考虑 + // TODO(yyk) should try not to block the queue, and specific considerations should be taken later bool result = cloneMgr_->IssueCloneTask(cloneTask); if (!result) { LOG(ERROR) << "issue clone task failed: " @@ -340,14 +340,14 @@ void ReadChunkRequest::OnApply(uint64_t index, CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); break; } - // 如果请求成功转发给了clone manager就可以直接返回了 + // If the request is successfully forwarded to the clone manager, it can be returned directly return; } - // 如果是ReadChunk请求还需要从本地读取数据 + // If it is a ReadChunk request, data needs to be read locally if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_READ) { ReadChunk(); } - // 如果是recover请求,说明请求区域已经被写过了,可以直接返回成功 + // If it is a recover request, it indicates that the request area has been written and can directly return success if (request_->optype() == CHUNK_OP_TYPE::CHUNK_OP_RECOVER) { response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } @@ -367,19 +367,19 @@ void ReadChunkRequest::OnApplyFromLog(std::shared_ptr datastore, (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing + // Read doesn't need to do anything } bool ReadChunkRequest::NeedClone(const CSChunkInfo& chunkInfo) { - // 如果不是 clone chunk,就不需要拷贝 + // If it's not a clone chunk, there's no need to copy it if (chunkInfo.isClone) { off_t offset = request_->offset(); size_t length = request_->size(); uint32_t blockSize = chunkInfo.blockSize; uint32_t beginIndex = offset / blockSize; uint32_t endIndex = (offset + length - 1) / blockSize; - // 如果是clone chunk,且存在未被写过的page,就需要拷贝 + // If it is a clone chunk and there are unwritten pages, it needs to be copied if (chunkInfo.bitmap->NextClearBit(beginIndex, endIndex) != Bitmap::NO_POS) { return true; @@ -450,8 +450,8 @@ void WriteChunkRequest::OnApply(uint64_t index, response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); node_->UpdateAppliedIndex(index); } else if (CSErrorCode::BackwardRequestError == ret) { - // 打快照那一刻是有可能出现旧版本的请求 - // 返回错误给客户端,让客户端带新版本来重试 + // At the moment of taking a snapshot, there may be requests for older versions + // Return an error to the client and ask them to try again with the new version of the original LOG(WARNING) << "write failed: " << " data store return: " << ret << ", request: " << request_->ShortDebugString(); @@ -461,9 +461,9 @@ void WriteChunkRequest::OnApply(uint64_t index, CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * internalerror一般是磁盘错误,为了防止副本不一致,让进程退出 - * TODO(yyk): 当前遇到write错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 + * An internal error is usually a disk error. To prevent inconsistent replicas, the process is forced to exit + * TODO(yyk): Currently encountering a write error, directly fatally exit the entire process + * ChunkServer will consider only flagging this copyset in the later stage to ensure good availability */ LOG(FATAL) << "write failed: " << " data store return: " << ret @@ -483,7 +483,7 @@ void WriteChunkRequest::OnApply(uint64_t index, void WriteChunkRequest::OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing uint32_t cost; std::string cloneSourceLocation; if (existCloneInfo(&request)) { @@ -536,7 +536,7 @@ void ReadSnapshotRequest::OnApply(uint64_t index, do { /** - * 1.成功 + * 1. Success */ if (CSErrorCode::Success == ret) { cntl_->response_attachment().append(wrapper); @@ -548,7 +548,7 @@ void ReadSnapshotRequest::OnApply(uint64_t index, * 2.chunk not exist */ if (CSErrorCode::ChunkNotExistError == ret) { - response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); //NOLINT + response_->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); // NOLINT break; } /** @@ -560,7 +560,7 @@ void ReadSnapshotRequest::OnApply(uint64_t index, << ", request: " << request_->ShortDebugString(); } /** - * 4.其他错误 + * 4. Other errors */ LOG(ERROR) << "read snapshot failed: " << " data store return: " << ret @@ -578,8 +578,8 @@ void ReadSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, (void)datastore; (void)request; (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request - // read什么都不用做 + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing + // Read doesn't need to do anything } void DeleteSnapshotRequest::OnApply(uint64_t index, @@ -611,11 +611,11 @@ void DeleteSnapshotRequest::OnApply(uint64_t index, response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT +void DeleteSnapshotRequest::OnApplyFromLog(std::shared_ptr datastore, // NOLINT const ChunkRequest &request, const butil::IOBuf &data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing auto ret = datastore->DeleteSnapshotChunkOrCorrectSn( request.chunkid(), request.correctedsn()); if (CSErrorCode::Success == ret) { @@ -652,8 +652,8 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, CSErrorCode::CrcCheckError == ret || CSErrorCode::FileFormatError == ret) { /** - * TODO(yyk): 当前遇到createclonechunk错误直接fatal退出整个 - * ChunkServer后期考虑仅仅标坏这个copyset,保证较好的可用性 + * TODO(yyk): Currently encountering the createclonechunk error, directly fatally exit the entire process + * ChunkServer will consider only flagging this copyset in the later stage to ensure good availability */ LOG(FATAL) << "create clone failed: " << ", request: " << request_->ShortDebugString(); @@ -674,11 +674,11 @@ void CreateCloneChunkRequest::OnApply(uint64_t index, response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT +void CreateCloneChunkRequest::OnApplyFromLog(std::shared_ptr datastore, // NOLINT const ChunkRequest &request, const butil::IOBuf &data) { (void)data; - // NOTE: 处理过程中优先使用参数传入的datastore/request + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing auto ret = datastore->CreateCloneChunk(request.chunkid(), request.sn(), request.correctedsn(), @@ -714,8 +714,8 @@ void PasteChunkInternalRequest::Process() { } /** - * 如果propose成功,说明request成功交给了raft处理, - * 那么done_就不能被调用,只有propose失败了才需要提前返回 + * If the proposal is successful, it indicates that the request has been successfully handed over to the raft for processing, + * So, done_ cannot be called, only if the proposal fails, it needs to be returned in advance */ if (0 == Propose(request_, &data_)) { doneGuard.release(); @@ -727,7 +727,7 @@ void PasteChunkInternalRequest::OnApply(uint64_t index, brpc::ClosureGuard doneGuard(done); auto ret = datastore_->PasteChunk(request_->chunkid(), - data_.to_string().c_str(), //NOLINT + data_.to_string().c_str(), // NOLINT request_->offset(), request_->size()); @@ -746,10 +746,10 @@ void PasteChunkInternalRequest::OnApply(uint64_t index, response_->set_appliedindex(MaxAppliedIndex(node_, index)); } -void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT +void PasteChunkInternalRequest::OnApplyFromLog(std::shared_ptr datastore, // NOLINT const ChunkRequest &request, const butil::IOBuf &data) { - // NOTE: 处理过程中优先使用参数传入的datastore/request + // NOTE: Prioritize the use of datastore/request passed in as parameters during processing auto ret = datastore->PasteChunk(request.chunkid(), data.to_string().c_str(), request.offset(), @@ -819,7 +819,7 @@ void ScanChunkRequest::OnApply(uint64_t index, } } -void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, //NOLINT +void ScanChunkRequest::OnApplyFromLog(std::shared_ptr datastore, // NOLINT const ChunkRequest &request, const butil::IOBuf &data) { (void)data; diff --git a/src/chunkserver/op_request.h b/src/chunkserver/op_request.h index c29484f79b..4dee671f95 100755 --- a/src/chunkserver/op_request.h +++ b/src/chunkserver/op_request.h @@ -72,84 +72,84 @@ class ChunkOpRequest : public std::enable_shared_from_this { virtual ~ChunkOpRequest() = default; /** - * 处理request,实际上是Propose给相应的copyset + * Processing a request actually involves proposing to the corresponding copyset */ virtual void Process(); /** - * request正常情况从内存中获取上下文on apply逻辑 - * @param index:此op log entry的index - * @param done:对应的ChunkClosure + * request normally obtains context on apply logic from memory + * @param index: The index of this op log entry + * @param done: corresponding ChunkClosure */ virtual void OnApply(uint64_t index, ::google::protobuf::Closure *done) = 0; /** - * NOTE: 子类实现过程中优先使用参数传入的datastore/request - * 从log entry反序列之后得到request详细信息进行处理,request - * 相关的上下文和依赖的data store都是从参数传递进去的 - * 1.重启回放日志,从磁盘读取op log entry然后执行on apply逻辑 - * 2. follower执行on apply的逻辑 - * @param datastore:chunk数据持久化层 - * @param request:反序列化后得到的request 细信息 - * @param data:反序列化后得到的request要处理的数据 + * NOTE: During subclass implementation, prioritize the use of datastore/request passed in as parameters + * Obtain detailed request information from the reverse sequence of the log entry for processing, request + * The relevant context and dependent data store are passed in from parameters + * 1. Restart the replay log, read the op log entry from the disk, and then execute the on apply logic + * 2. follower execute the logic of on apply + * @param datastore: chunk data persistence layer + * @param request: The detailed request information obtained after deserialization + * @param data: The data to be processed by the request obtained after deserialization */ virtual void OnApplyFromLog(std::shared_ptr datastore, const ChunkRequest &request, const butil::IOBuf &data) = 0; /** - * 返回request的done成员 + * Return the done member of the request */ ::google::protobuf::Closure *Closure() { return done_; } /** - * 返回chunk id + * Return chunk id */ ChunkID ChunkId() { return request_->chunkid(); } /** - * 返回请求类型 + * Return request type */ CHUNK_OP_TYPE OpType() { return request_->optype(); } /** - * 返回请求大小 + * Return request size */ uint32_t RequestSize() { return request_->size(); } /** - * 转发request给leader + * Forward request to leader */ virtual void RedirectChunkRequest(); public: /** - * Op序列化工具函数 + * Op Serialization Tool Function * | data | * | op meta | op data | * | op request length | op request | * | 32 bit | .... | - * 各个字段解释如下: - * data: encode之后的数据,实际上就是一条op log entry的data - * op meta: 就是op的元数据,这里是op request部分的长度 - * op data: 就是request通过protobuf序列化后的数据 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @param log:出参,存放序列化好的数据,用户自己保证data!=nullptr - * @return 0成功,-1失败 + * The explanation of each field is as follows: + * data: The data after encoding is actually the data of an op log entry + * op meta: refers to the metadata of op, where is the length of the op request section + * op data: refers to the data serialized by the request through protobuf + * @param request: Chunk Request + * @param data: The data content contained in the request + * @param log: Provide parameters, store serialized data, and ensure the data by the user= Nullptr + * @return 0 succeeded, -1 failed */ static int Encode(const ChunkRequest *request, const butil::IOBuf *data, butil::IOBuf *log); /** - * 反序列化,从log entry得到ChunkOpRequest,当前反序列出的ChunkRequest和data - * 都会从出参传出去,而不会放在ChunkOpRequest的成员变量里面 - * @param log:op log entry - * @param request: 出参,存放反序列上下文 - * @param data:出参,op操作的数据 - * @return nullptr,失败,否则返回相应的ChunkOpRequest + * Deserialize, obtain ChunkOpRequest from log entry, and deserialize the current ChunkRequest and data + * Will be passed out from the output parameter, rather than being placed in the member variable of ChunkOpRequest + * @param log: op log entry + * @param request: Provide parameters to store the reverse sequence context + * @param data: Output parameters, op operation data + * @return nullptr, failed, otherwise return the corresponding ChunkOpRequest */ static std::shared_ptr Decode(butil::IOBuf log, ChunkRequest *request, @@ -161,24 +161,24 @@ class ChunkOpRequest : public std::enable_shared_from_this { protected: /** - * 打包request为braft::task,propose给相应的复制组 - * @param request:Chunk Request - * @param data:请求中包含的数据内容 - * @return 0成功,-1失败 + * Package the request as braft::task and propose it to the corresponding replication group + * @param request: Chunk Request + * @param data: The data content contained in the request + * @return 0 succeeded, -1 failed */ int Propose(const ChunkRequest *request, const butil::IOBuf *data); protected: - // chunk持久化接口 + // chunk Persistence Interface std::shared_ptr datastore_; - // 复制组 + // Copy Group std::shared_ptr node_; // rpc controller brpc::Controller *cntl_; - // rpc 请求 + // rpc request const ChunkRequest *request_; - // rpc 返回 + // rpc return ChunkResponse *response_; // rpc done closure ::google::protobuf::Closure *done_; @@ -233,16 +233,16 @@ class ReadChunkRequest : public ChunkOpRequest { } private: - // 根据chunk信息判断是否需要拷贝数据 + // Determine whether to copy data based on chunk information bool NeedClone(const CSChunkInfo& chunkInfo); - // 从chunk文件中读数据 + // Reading data from chunk file void ReadChunk(); private: CloneManager* cloneMgr_; - // 并发模块 + // Concurrent module ConcurrentApplyModule* concurrentApplyModule_; - // 保存 apply index + // Save the apply index uint64_t applyIndex; }; diff --git a/src/chunkserver/passive_getfn.h b/src/chunkserver/passive_getfn.h index ac6655d1b2..8029f31696 100644 --- a/src/chunkserver/passive_getfn.h +++ b/src/chunkserver/passive_getfn.h @@ -32,8 +32,8 @@ namespace curve { namespace chunkserver { /** - * 获取datastore中chunk文件的数量 - * @param arg: datastore的对象指针 + * Obtain the number of chunk files in the datastore + * @param arg: Object pointer to datastore */ uint32_t GetDatastoreChunkCountFunc(void* arg); /** @@ -42,17 +42,17 @@ namespace chunkserver { */ uint32_t GetLogStorageWalSegmentCountFunc(void* arg); /** - * 获取datastore中快照chunk的数量 - * @param arg: datastore的对象指针 + * Obtain the number of snapshot chunks in the datastore + * @param arg: Object pointer to datastore */ uint32_t GetDatastoreSnapshotCountFunc(void* arg); /** - * 获取datastore中clone chunk的数量 - * @param arg: datastore的对象指针 + * Obtain the number of clone chunks in the datastore + * @param arg: Object pointer to datastore */ uint32_t GetDatastoreCloneChunkCountFunc(void* arg); /** - * 获取chunkserver上chunk文件的数量 + * Obtain the number of chunk files on the chunkserver * @param arg: nullptr */ uint32_t GetTotalChunkCountFunc(void* arg); @@ -63,28 +63,28 @@ namespace chunkserver { uint32_t GetTotalWalSegmentCountFunc(void* arg); /** - * 获取chunkserver上快照chunk的数量 + * Obtain the number of snapshot chunks on the chunkserver * @param arg: nullptr */ uint32_t GetTotalSnapshotCountFunc(void* arg); /** - * 获取chunkserver上clone chunk的数量 + * Obtain the number of clone chunks on the chunkserver * @param arg: nullptr */ uint32_t GetTotalCloneChunkCountFunc(void* arg); /** - * 获取chunkfilepool中剩余chunk的数量 - * @param arg: chunkfilepool的对象指针 + * Obtain the number of remaining chunks in the chunkfilepool + * @param arg: Object pointer to chunkfilepool */ uint32_t GetChunkLeftFunc(void* arg); /** - * 获取walfilepool中剩余chunk的数量 - * @param arg: walfilepool的对象指针 + * Obtain the number of remaining chunks in the walfilepool + * @param arg: Object pointer to walfilepool */ uint32_t GetWalSegmentLeftFunc(void* arg); /** - * 获取trash中chunk的数量 - * @param arg: trash的对象指针 + * Obtain the number of chunks in the trash + * @param arg: Object pointer to trash */ uint32_t GetChunkTrashedFunc(void* arg); diff --git a/src/chunkserver/raftsnapshot/curve_file_adaptor.h b/src/chunkserver/raftsnapshot/curve_file_adaptor.h index 2f6b23ec0b..b64ad12c91 100644 --- a/src/chunkserver/raftsnapshot/curve_file_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_file_adaptor.h @@ -31,7 +31,7 @@ namespace chunkserver { class CurveFileAdaptor : public braft::PosixFileAdaptor { public: explicit CurveFileAdaptor(int fd) : PosixFileAdaptor(fd) {} - // close之前必须先sync,保证数据落盘,其他逻辑不变 + // Before closing, you must first synchronize to ensure that the data is dropped and other logic remains unchanged bool close() override { return sync() && braft::PosixFileAdaptor::close(); } diff --git a/src/chunkserver/raftsnapshot/curve_file_service.cpp b/src/chunkserver/raftsnapshot/curve_file_service.cpp index f1d5d931e0..808886f6fe 100644 --- a/src/chunkserver/raftsnapshot/curve_file_service.cpp +++ b/src/chunkserver/raftsnapshot/curve_file_service.cpp @@ -63,9 +63,9 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, if (iter == _reader_map.end()) { lck.unlock(); /** - * 为了和文件不存在的错误区分开来,且考虑到install snapshot - * 的uri format为:remote://ip:port/reader_id,所以使用ENXIO - * 代表reader id不存在的错误 + * In order to distinguish between the error of a non-existent file + * and considering that the uri format for installing a snapshot is: remote://ip:port/reader_id, + * ENXIO is used to represent the error of a non-existent reader id. */ cntl->SetFailed(ENXIO, "Fail to find reader=%" PRId64, request->reader_id()); @@ -88,10 +88,10 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, butil::IOBuf buf; bool is_eof = false; size_t read_count = 0; - // 1. 如果是read attch meta file + // 1. If it is a read attach meta file if (request->filename() == BRAFT_SNAPSHOT_ATTACH_META_FILE) { - // 如果没有设置snapshot attachment,那么read文件的长度为零 - // 表示没有 snapshot attachment文件列表 + // If no snapshot attachment is set, then the length of the read file is zero, + // indicating that there are no snapshot attachment files in the list. bool snapshotAttachmentExist = false; { std::unique_lock lck(_mutex); @@ -104,7 +104,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } } if (snapshotAttachmentExist) { - // 否则获取snapshot attachment file list + // Otherwise, obtain the snapshot attachment file list std::vector files; _snapshot_attachment->list_attach_files(&files, reader->path()); CurveSnapshotAttachMetaTable attachMetaTable; @@ -135,7 +135,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, } if (0 != attachMetaTable.save_to_iobuf_as_remote(&buf)) { - // 内部错误: EINTERNAL + // Internal error: EINTERNAL LOG(ERROR) << "Fail to serialize " "LocalSnapshotAttachMetaTable as iobuf"; cntl->SetFailed(brpc::EINTERNAL, @@ -149,7 +149,7 @@ void CurveFileService::get_file(::google::protobuf::RpcController* controller, read_count = buf.size(); } } else { - // 2. 否则其它文件下载继续走raft原先的文件下载流程 + // 2. Otherwise, the download of other files will continue to follow the original file download process of Raft const int rc = reader->read_file( &buf, request->filename(), request->offset(), request->count(), diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp index 18479b26a6..8642507c83 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.cpp @@ -69,18 +69,18 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, if (cloexec && !local_s_support_cloexec_on_open) { oflag &= (~O_CLOEXEC); } - // Open就使用sync标志是为了避免集中在close一次性sync,对于16MB的chunk文件可能会造成抖动 + // The use of the sync flag in Open is to avoid focusing on the close one-time sync, which may cause jitter for 16MB chunk files oflag |= O_SYNC; - // 先判断当前文件是否需要过滤,如果需要过滤,就直接走下面逻辑,不走chunkfilepool - // 如果open操作携带create标志,则从chunkfilepool取,否则保持原来语意 - // 如果待打开的文件已经存在,则直接使用原有语意 + // First, determine whether the current file needs to be filtered. If it needs to be filtered, simply follow the following logic instead of chunkfilepool + // If the open operation carries the create flag, it will be taken from chunkfilepool, otherwise it will maintain its original meaning + // If the file to be opened already exists, use the original meaning directly if (!NeedFilter(path) && (oflag & O_CREAT) && false == lfs_->FileExists(path)) { - // 从chunkfile pool中取出chunk返回 + // Removing a chunk from the chunkfile pool returns int rc = chunkFilePool_->GetFile(path, tempMetaPageContent); - // 如果从FilePool中取失败,返回错误。 + // If retrieving from FilePool fails, an error is returned. if (rc != 0) { LOG(ERROR) << "get chunk from chunkfile pool failed!"; return NULL; @@ -116,9 +116,9 @@ braft::FileAdaptor* CurveFilesystemAdaptor::open(const std::string& path, bool CurveFilesystemAdaptor::delete_file(const std::string& path, bool recursive) { - // 1. 如果是目录且recursive=true,那么遍历目录内容回收 - // 2. 如果是目录且recursive=false,那么判断目录内容是否为空,不为空返回false - // 3. 如果是文件直接回收 + // 1. If it is a directory and recursive=true, then traverse the directory content to recycle + // 2. If it is a directory and recursive=false, then determine whether the directory content is empty, and return false if it is not empty + // 3. If the file is directly recycled if (lfs_->DirExists(path)) { std::vector dircontent; lfs_->List(path, &dircontent); @@ -130,11 +130,11 @@ bool CurveFilesystemAdaptor::delete_file(const std::string& path, } } else { if (lfs_->FileExists(path)) { - // 如果在过滤名单里,就直接删除 + // If it is on the filtering list, delete it directly if (NeedFilter(path)) { return lfs_->Delete(path) == 0; } else { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // The chunkfilepool will internally check the legality of the corresponding path file, and if it does not match, it will be deleted directly return chunkFilePool_->RecycleFile(path) == 0; } } @@ -152,7 +152,7 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( if (lfs_->DirExists(todeletePath)) { RecycleDirRecursive(todeletePath); } else { - // 如果在过滤名单里,就直接删除 + // If it is on the filtering list, delete it directly if (NeedFilter(todeletePath)) { if (lfs_->Delete(todeletePath) != 0) { LOG(ERROR) << "delete " << todeletePath << ", failed!"; @@ -175,7 +175,7 @@ bool CurveFilesystemAdaptor::RecycleDirRecursive( bool CurveFilesystemAdaptor::rename(const std::string& old_path, const std::string& new_path) { if (!NeedFilter(new_path) && lfs_->FileExists(new_path)) { - // chunkfilepool内部会检查path对应文件合法性,如果不符合就直接删除 + // The chunkfilepool will internally check the legality of the corresponding path file, and if it does not match, it will be deleted directly chunkFilePool_->RecycleFile(new_path); } return lfs_->Rename(old_path, new_path) == 0; diff --git a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h index 4e6737b8d4..b43c49e033 100644 --- a/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h +++ b/src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h @@ -33,14 +33,14 @@ #include "src/chunkserver/raftsnapshot/curve_file_adaptor.h" /** - * RaftSnapshotFilesystemAdaptor目的是为了接管braft - * 内部snapshot创建chunk文件的逻辑,目前curve内部 - * 会从chunkfilepool中直接取出已经格式化好的chunk文件 - * 但是braft内部由于install snapshot也会创建chunk文件 - * 这个创建文件不感知chunkfilepool,因此我们希望install - * snapshot也能从chunkfilepool中直接取出chunk文件,因此 - * 我们对install snapshot流程中的文件系统做了一层hook,在 - * 创建及删除文件操作上直接使用curve提供的文件系统接口即可。 + * The purpose of RaftSnapshotFilesystemAdaptor is to take over the logic of creating chunk files + * for internal snapshots in braft. Currently, within Curve, we directly retrieve pre-formatted chunk files + * from the chunk file pool. However, within braft, + * the creation of chunk files during an install snapshot + * process does not interact with the chunk file pool. Therefore, we want the install snapshot process + * to also be able to retrieve chunk files directly from the chunk file pool. To achieve this, + * we have implemented a hook in the file system operations within the install snapshot process. + * This hook allows us to use the file system interface provided by Curve for file creation and deletion. */ using curve::fs::LocalFileSystem; @@ -49,18 +49,18 @@ using curve::chunkserver::FilePool; namespace curve { namespace chunkserver { /** - * CurveFilesystemAdaptor继承raft的PosixFileSystemAdaptor类,在raft - * 内部其快照使用PosixFileSystemAdaptor类进行文件操作,因为我们只希望在其创建文件 - * 或者删除文件的时候使用chunkfilepool提供的getchunk和recyclechunk接口,所以这里 - * 我们只实现了open和delete_file两个接口。其他接口在调用的时候仍然使用原来raft的内部 - * 的接口。 + * CurveFilesystemAdaptor inherits from Raft's PosixFileSystemAdaptor class. Within the Raft framework, + * it uses the PosixFileSystemAdaptor class for file operations during snapshots. However, we only want + * to use the `getchunk` and `recyclechunk` interfaces provided by the chunkfilepool when creating or + * deleting files. Therefore, in this context, we have only implemented the `open` and `delete_file` interfaces. + * Other interfaces are still used with the original internal Raft interfaces when called. */ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { public: /** - * 构造函数 - * @param: chunkfilepool用于获取和回收chunk文件 - * @param: lfs用于进行一些文件操作,比如打开或者删除目录 + * Constructor + * @param: chunkfilepool is used to retrieve and recycle chunk files + * @param: lfs is used for some file operations, such as opening or deleting directories */ CurveFilesystemAdaptor(std::shared_ptr filePool, std::shared_ptr lfs); @@ -68,14 +68,14 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { virtual ~CurveFilesystemAdaptor(); /** - * 打开文件,在raft内部使用open来创建一个文件,并返回FileAdaptor结构 - * @param: path是当前待打开的路径 - * @param: oflag为打开文件参数 - * @param: file_meta是当前文件的meta信息,这个参数内部未使用 - * @param: e为打开文件是的错误码 - * @return: FileAdaptor是raft内部封装fd的一个类,fd是open打开path的返回值 - * 后续所有对于该文件的读写都是通过该FileAdaptor指针进行的,其内部封装了 - * 读写操作,其内部定义如下。 + * Open the file, use open inside the raft to create a file, and return the FileAdaptor structure + * @param: path is the current path to be opened + * @param: oflag is the parameter for opening a file + * @param: file_meta is the meta information of the current file, which is not used internally + * @param: e is the error code for opening the file + * @return: FileAdaptor is a class within Raft that encapsulates a file descriptor (fd). After opening a path with the `open` call, + * all subsequent read and write operations on that file are performed through a pointer to this FileAdaptor class. + * It internally defines the following operations: * class PosixFileAdaptor : public FileAdaptor { * friend class PosixFileSystemAdaptor; * public: @@ -98,57 +98,57 @@ class CurveFilesystemAdaptor : public braft::PosixFileSystemAdaptor { const ::google::protobuf::Message* file_meta, butil::File::Error* e); /** - * 删除path对应的文件或目录 - * @param: path是待删除的文件路径 - * @param: recursive是否递归删除 - * @return: 成功返回true,否则返回false + * Delete the file or directory corresponding to the path + * @param: path is the file path to be deleted + * @param: Recursive whether to recursively delete + * @return: Successfully returns true, otherwise returns false */ virtual bool delete_file(const std::string& path, bool recursive); - /** - * rename到新路径 - * 为什么要重载rename? - * 由于raft内部使用的是本地文件系统的rename,如果目标new path - * 已经存在文件,那么就会覆盖该文件。这样raft内部会创建temp_snapshot_meta - * 文件,这个是为了保证原子修改snapshot_meta文件而设置的,然后通过rename保证 - * 修改snapshot_meta文件修改的原子性。如果这个temp_snapshot_meta是从chunkfilpool - * 取的,那么如果直接rename,这个temp_snapshot_meta文件所占用的chunk文件 - * 就永远收不回来了,这种情况下会消耗大量的预分配chunk,所以这里重载rename,先 - * 回收new path,然后再rename, - * @param: old_path旧文件路径 - * @param: new_path新文件路径 - */ + /** + * Rename to a new path. + * Why override the rename function? + * Raft internally uses the rename function of the local file system. If the target new path already exists as a file, it will overwrite that file. This behavior leads to the creation of a + * 'temp_snapshot_meta' file, which is set up to ensure the atomic modification of the 'snapshot_meta' file. + * Using rename helps ensure the atomicity of modifying the 'snapshot_meta' file. However, if the 'temp_snapshot_meta' + * file is allocated from the chunk file pool and renamed directly, the chunk file used by the 'temp_snapshot_meta' + * file will never be released. In this situation, a significant number of pre-allocated chunks can be consumed. + * Therefore, the rename function is overridden here to first release the resources associated with the new path, + * and then perform the rename operation. + * @param: old_path - The old file path + * @param: new_path - The new file path + */ virtual bool rename(const std::string& old_path, const std::string& new_path); - // 设置过滤哪些文件,这些文件不从chunkfilepool取 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Set which files to filter and do not retrieve them from chunkfilepool + // Delete these files directly during recycling without entering the chunkfilepool void SetFilterList(const std::vector& filter); private: /** - * 递归回收目录内容 - * @param: path为待回收的目录路径 - * @return: 成功返回true,否则返回false + * Recursive recycling of directory content + * @param: path is the directory path to be recycled + * @return: Successfully returns true, otherwise returns false */ bool RecycleDirRecursive(const std::string& path); /** - * 查看文件是否需要过滤 + * Check if the file needs to be filtered */ bool NeedFilter(const std::string& filename); private: - // 由于chunkfile pool获取新的chunk时需要传入metapage信息 - // 这里创建一个临时的metapage,其内容无关紧要,因为快照会覆盖这部分内容 + // Due to the need to pass in metapage information when obtaining new chunks in the chunkfile pool + // Create a temporary metapage here, whose content is irrelevant as the snapshot will overwrite this part of the content char* tempMetaPageContent; - // 我们自己的文件系统,这里文件系统会做一些打开及删除目录操作 + // Our own file system, where the file system performs some opening and deleting directory operations std::shared_ptr lfs_; - // 操作chunkfilepool的指针,这个FilePool_与copysetnode的 - // chunkfilepool_应该是全局唯一的,保证操作chunkfilepool的原子性 + // Pointer to operate chunkfilepool, this FilePool_ Related to copysetnode + // Chunkfilepool_ It should be globally unique, ensuring the atomicity of the chunkfilepool operation std::shared_ptr chunkFilePool_; - // 过滤名单,在当前vector中的文件名,都不从chunkfilepool中取文件 - // 回收的时候也直接删除这些文件,不进入chunkfilepool + // Filter the list and do not retrieve file names from chunkfilepool in the current vector + // Delete these files directly during recycling without entering the chunkfilepool std::vector filterList_; }; } // namespace chunkserver diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp index 93d4a7c324..1f00a3d7d8 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.cpp @@ -43,15 +43,15 @@ void CurveSnapshotAttachment::list_attach_files( std::vector snapFiles; int rc = fileHelper_.ListFiles(dataDir, nullptr, &snapFiles); - // list出错一般认为就是磁盘出现问题了,这种情况直接让进程挂掉 - // Attention: 这里还需要更仔细考虑 + // An error in the list is generally believed to be due to a disk issue, which directly causes the process to crash + // Attention: More careful consideration is needed here CHECK(rc == 0) << "List dir failed."; files->clear(); - // 文件路径格式与snapshot_meta中的格式要相同 + // File path format and the format in snapshot_meta should be the same for (const auto& snapFile : snapFiles) { std::string snapApath; - // 添加绝对路径 + // Add absolute path snapApath.append(dataDir); snapApath.append("/").append(snapFile); std::string filePath = curve::common::CalcRelativePath( diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h index 10e2172673..bc361194d6 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_attachment.h @@ -34,8 +34,8 @@ namespace curve { namespace chunkserver { /** - * 用于获取snapshot attachment files的接口,一般用于一些下载 - * 快照获取需要额外下载的文件list + * The interface used to obtain snapshot attachment files, usually used for some downloads + * List of files that require additional downloads for snapshot acquisition */ class SnapshotAttachment : public butil::RefCountedThreadSafe { @@ -44,24 +44,24 @@ class SnapshotAttachment : virtual ~SnapshotAttachment() = default; /** - * 获取snapshot attachment文件列表 - * @param files[out]: attachment文件列表 - * @param snapshotPath[in]: braft快照的路径 + * Obtain a list of snapshot attachment files + * @param files[out]: attachment file list + * @param snapshotPath[in]: Path to the brace snapshot */ virtual void list_attach_files(std::vector *files, const std::string& raftSnapshotPath) = 0; }; -// SnapshotAttachment接口的实现,用于raft加载快照时,获取chunk快照文件列表 +// Implementation of the SnapshotAttachment interface, used to obtain a list of chunk snapshot files when loading snapshots in the raft class CurveSnapshotAttachment : public SnapshotAttachment { public: explicit CurveSnapshotAttachment(std::shared_ptr fs); virtual ~CurveSnapshotAttachment() = default; /** - * 获取raft snapshot的attachment,这里就是获取chunk的快照文件列表 - * @param files[out]: data目录下的chunk快照文件列表 - * @param raftSnapshotPath: braft快照的路径 - * 返回的文件路径使用 绝对路径:相对路径 的格式,相对路径包含data目录 + *Obtain the attachment of the raft snapshot, which is the list of snapshot files for the chunk + * @param files[out]: List of chunk snapshot files in the data directory + * @param raftSnapshotPath: Path to the brace snapshot + * The returned file path uses an absolute path: in the format of a relative path, which includes the data directory */ void list_attach_files(std::vector *files, const std::string& raftSnapshotPath) override; @@ -70,12 +70,12 @@ class CurveSnapshotAttachment : public SnapshotAttachment { }; /* -* @brif 通过具体的某个raft的snapshot实例地址获取raft实例基础地址 -* @param[in] specificSnapshotDir 某个具体snapshot的目录 - 比如/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ -* @param[in] raftSnapshotRelativeDir 上层业务指的所有snapshot的相对基地址 - 比如raft_snapshot -* @return 返回raft实例的绝对基地址,/data/chunkserver1/copysets/4294967812/ +* @brif obtains the base address of a raft instance through the snapshot instance address of a specific raft +* @param[in] specificSnapshotDir The directory of a specific snapshot + For example,/data/chunkserver1/copysets/4294967812/raft_snapshot/snapshot_805455/ +* @param[in] raftSnapshotRelativeDir The relative base addresses of all snapshots referred to by the upper level business + For example, raft_ Snapshot +* @return returns the absolute base address of the raft instance,/data/chunkserver1/copysets/4294967812/ */ inline std::string getCurveRaftBaseDir(std::string specificSnapshotDir, std::string raftSnapshotRelativeDir) { diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp index 6a996695bd..b54bbc1c2b 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.cpp @@ -71,7 +71,7 @@ void *CurveSnapshotCopier::start_copy(void* arg) { void CurveSnapshotCopier::copy() { do { - // 下载snapshot meta中记录的文件 + // Download the files recorded in the snapshot meta load_meta_table(); if (!ok()) { break; @@ -86,7 +86,7 @@ void CurveSnapshotCopier::copy() { copy_file(files[i]); } - // 下载snapshot attachment文件 + // Download snapshot attachment file load_attach_meta_table(); if (!ok()) { break; @@ -169,7 +169,7 @@ void CurveSnapshotCopier::load_attach_meta_table() { return; } - // 如果attach meta table为空,那么说明没有snapshot attachment files + // If the attach_meta_table is empty, then there are no snapshot attachment files if (0 == meta_buf.size()) { return; } @@ -355,7 +355,7 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { _cur_session = NULL; lck.unlock(); if (!session->status().ok()) { - // 如果是文件不存在,那么删除刚开始open的文件 + // If the file does not exist, delete the file that was just opened if (session->status().error_code() == ENOENT) { bool rc = _fs->delete_file(file_path, false); if (!rc) { @@ -371,7 +371,7 @@ void CurveSnapshotCopier::copy_file(const std::string& filename, bool attch) { session->status().error_cstr()); return; } - // 如果是attach file,那么不需要持久化file meta信息 + // If it is an attach file, then there is no need to persist the file meta information if (!attch && _writer->add_file(filename, &meta) != 0) { set_error(EIO, "Fail to add file to writer"); return; diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h index 1c991720b0..6948e6e3be 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_copier.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_copier.h @@ -75,7 +75,7 @@ class CurveSnapshotCopier : public braft::SnapshotCopier { braft::SnapshotReader* last_snapshot); void filter(); void copy_file(const std::string& filename, bool attach = false); - // 这里的filename是相对于快照目录的路径,为了先把文件下载到临时目录,需要把前面的..去掉 + // The filename here is the path relative to the snapshot directory. In order to download the file to the temporary directory first, it is necessary to Remove std::string get_rfilename(const std::string& filename); braft::raft_mutex_t _mutex; diff --git a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h index 97c553661c..5402361292 100644 --- a/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h +++ b/src/chunkserver/raftsnapshot/curve_snapshot_file_reader.h @@ -55,9 +55,8 @@ namespace curve { namespace chunkserver { /** - * snapshot attachment文件元数据表,同上面的 - * CurveSnapshotAttachMetaTable接口,主要提供attach文件元数据信息 - * 的查询、序列化和反序列等接口 + * Snapshot attachment file metadata table, similar to the above CurveSnapshotAttachMetaTable interface. + * This table primarily provides interfaces for querying, serializing, and deserializing attachment file metadata */ class CurveSnapshotAttachMetaTable { public: diff --git a/src/chunkserver/raftsnapshot/define.h b/src/chunkserver/raftsnapshot/define.h index 012da7f1ba..de4e0a101a 100644 --- a/src/chunkserver/raftsnapshot/define.h +++ b/src/chunkserver/raftsnapshot/define.h @@ -29,8 +29,8 @@ namespace chunkserver { const char RAFT_DATA_DIR[] = "data"; const char RAFT_META_DIR[] = "raft_meta"; -// TODO(all:fix it): RAFT_SNAP_DIR注意当前这个目录地址不能修改 -// 与当前外部依赖curve-braft代码强耦合(两边硬编码耦合) +// TODO(all:fix it): Note that the RAFT_SNAP_DIR directory address should not be modified at this time. +// This is tightly coupled with the current external dependency on curve-braft code (hardcoded coupling on both sides). const char RAFT_SNAP_DIR[] = "raft_snapshot"; const char RAFT_LOG_DIR[] = "log"; #define BRAFT_SNAPSHOT_PATTERN "snapshot_%020" PRId64 diff --git a/src/chunkserver/register.cpp b/src/chunkserver/register.cpp index 1616800c55..7ffaa5d82e 100644 --- a/src/chunkserver/register.cpp +++ b/src/chunkserver/register.cpp @@ -40,9 +40,9 @@ namespace chunkserver { Register::Register(const RegisterOptions &ops) { this->ops_ = ops; - // 解析mds的多个地址 + // Parsing multiple addresses of mds ::curve::common::SplitString(ops.mdsListenAddr, ",", &mdsEps_); - // 检验每个地址的合法性 + // Verify the legality of each address for (auto addr : mdsEps_) { butil::EndPoint endpt; if (butil::str2endpoint(addr.c_str(), &endpt) < 0) { @@ -105,7 +105,7 @@ int Register::RegisterToMDS(const ChunkServerMetadata *localMetadata, curve::mds::topology::TopologyService_Stub stub(&channel); stub.RegistChunkServer(&cntl, &req, &resp, nullptr); - // TODO(lixiaocui): 后续错误码和mds共享后改成枚举类型 + // TODO(lixiaocui): Change to enumeration type after sharing error codes and mds in the future if (!cntl.Failed() && resp.statuscode() == 0) { break; } else { diff --git a/src/chunkserver/register.h b/src/chunkserver/register.h index f89683087d..861e89313a 100644 --- a/src/chunkserver/register.h +++ b/src/chunkserver/register.h @@ -37,7 +37,7 @@ namespace curve { namespace chunkserver { const uint32_t CURRENT_METADATA_VERSION = 0x01; -// register配置选项 +// Register Configuration Options struct RegisterOptions { std::string mdsListenAddr; std::string chunkserverInternalIp; @@ -76,7 +76,7 @@ class Register { const std::shared_ptr &epochMap); /** - * @brief 持久化ChunkServer元数据 + * @brief Persisting ChunkServer metadata * * @param[in] metadata */ diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 2941261240..d75022c8a7 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -60,13 +60,13 @@ int Trash::Init(TrashOptions options) { walPool_ = options.walPool; chunkNum_.store(0); - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; localFileSystem_->List(trashPath_, &files); - // 遍历trash下的文件 + // Traverse through files under trash for (auto &file : files) { - // 如果不是copyset目录,跳过 + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -101,7 +101,7 @@ int Trash::Fini() { } int Trash::RecycleCopySet(const std::string &dirPath) { - // 回收站目录不存在,需要创建 + // The recycle bin directory does not exist and needs to be created if (!localFileSystem_->DirExists(trashPath_)) { LOG(INFO) << "Copyset recyler directory " << trashPath_ << " does not exist, creating it"; @@ -113,7 +113,7 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } } - // 如果回收站已存在该目录,本次删除失败 + // If the directory already exists in the recycle bin, this deletion failed std::string dst = trashPath_ + "/" + dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + '.' + std::to_string(std::time(nullptr)); @@ -138,27 +138,27 @@ int Trash::RecycleCopySet(const std::string &dirPath) { void Trash::DeleteEligibleFileInTrashInterval() { while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { - // 扫描回收站 + // Scan Recycle Bin DeleteEligibleFileInTrash(); } } void Trash::DeleteEligibleFileInTrash() { - // trash目录暂不存在 + // The trash directory does not currently exist if (!localFileSystem_->DirExists(trashPath_)) { return; } - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; if (0 != localFileSystem_->List(trashPath_, &files)) { LOG(ERROR) << "Trash failed list files in " << trashPath_; return; } - // 遍历trash下的文件 + // Traverse through files under trash for (auto &file : files) { - // 如果不是copyset目录,跳过 + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -172,7 +172,7 @@ void Trash::DeleteEligibleFileInTrash() { continue; } - // 删除copyset目录 + // Delete copyset directory if (0 != localFileSystem_->Delete(copysetDir)) { LOG(ERROR) << "Trash fail to delete " << copysetDir; return; @@ -181,9 +181,9 @@ void Trash::DeleteEligibleFileInTrash() { } bool Trash::IsCopysetInTrash(const std::string &dirName) { - // 合法的copyset目录: 高32位PoolId(>0)组成, 低32位由copysetId(>0)组成 - // 目录是十进制形式 - // 例如:2860448220024 (poolId: 666, copysetId: 888) + // Legal copyset directory: composed of high 32-bit PoolId(>0), and low 32-bit composed of copysetId(>0) + // The directory is in decimal form + // For example: 2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; auto n = dirName.find("."); if (n == std::string::npos) { @@ -227,7 +227,7 @@ bool Trash::IsChunkOrSnapShotFile(const std::string &chunkName) { bool Trash::RecycleChunksAndWALInDir( const std::string ©setPath, const std::string &filename) { bool isDir = localFileSystem_->DirExists(copysetPath); - // 是文件看是否需要回收 + // Is it a file to see if it needs to be recycled if (!isDir) { if (IsChunkOrSnapShotFile(filename)) { return RecycleChunkfile(copysetPath, filename); @@ -238,18 +238,18 @@ bool Trash::RecycleChunksAndWALInDir( } } - // 是目录,继续list + // It's a directory, continue with the list std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; return false; } - // 遍历子文件 + // Traverse sub files bool ret = true; for (auto &file : files) { std::string filePath = copysetPath + "/" + file; - // recycle 失败不应该中断其他文件的recycle + // recycle failure should not interrupt the recycle of other files if (!RecycleChunksAndWALInDir(filePath, file)) { ret = false; } diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index ff037db8a4..2ca51b6104 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -39,11 +39,11 @@ using ::curve::common::InterruptibleSleeper; namespace curve { namespace chunkserver { struct TrashOptions{ - // copyset的trash路径 + // The trash path of copyset std::string trashPath; - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for expiredAfteSec seconds int expiredAfterSec; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec; std::shared_ptr localFileSystem; @@ -60,50 +60,50 @@ class Trash { int Fini(); /* - * @brief DeleteEligibleFileInTrash 回收trash目录下的物理空间 + * @brief DeleteEligibleFileInTrash recycles the physical space in the trash directory */ void DeleteEligibleFileInTrash(); int RecycleCopySet(const std::string &dirPath); /* - * @brief 获取回收站中chunk的个数 + * @brief Get the number of chunks in the recycle bin * - * @return chunk个数 + * @return Number of chunks */ uint32_t GetChunkNum() {return chunkNum_.load();} private: /* - * @brief DeleteEligibleFileInTrashInterval 每隔一段时间进行trash物理空间回收 + * @brief DeleteEligibleFileInTrashInterval Trash physical space recycling at regular intervals */ void DeleteEligibleFileInTrashInterval(); /* - * @brief NeedDelete 文件是否需要删除,放入trash的时间大于 - * trash中expiredAfterSec可以删除 + * @brief NeedDelete Does the file need to be deleted, and the time it takes to place the trash is greater than + * ExpiredAfterSec in trash can be deleted * - * @param[in] copysetDir copyset的目录路径 + * @param[in] copysetDir copyset directory path * - * @return true-可以被删除 + * @return true - can be deleted */ bool NeedDelete(const std::string ©setDir); /* - * @brief IsCopysetInTrash 是否为回收站中的copyset的目录 + * @brief IsCopysetInTrash Is the directory of the copyset in the recycle bin * - * @param[in] dirName 文目录路径 + * @param[in] dirName directory path * - * @return true-符合copyset目录命名规则 + * @return true - Complies with copyset directory naming rules */ bool IsCopysetInTrash(const std::string &dirName); /* - * @brief IsChunkOrSnapShotFile 是否为chunk或snapshot文件 + * @brief IsChunkOrSnapShotFile Is a chunk or snapshot file * - * @param[in] chunkName 文件名 + * @param[in] chunkName file name * - * @return true-符合chunk或snapshot文件命名规则 + * @return true-Complies with chunk or snapshot file naming rules */ bool IsChunkOrSnapShotFile(const std::string &chunkName); @@ -119,8 +119,8 @@ class Trash { /* * @brief Recycle Chunkfile * - * @param[in] filepath 文件路径 - * @param[in] filename 文件名 + * @param[in] filepath file path + * @param[in] filename File name */ bool RecycleChunkfile( const std::string &filepath, const std::string &filename); @@ -149,41 +149,41 @@ class Trash { bool IsWALFile(const std::string &fileName); /* - * @brief 统计copyset目录中的chunk个数 + * @brief counts the number of chunks in the copyset directory * - * @param[in] copysetPath chunk所在目录 - * @return 返回chunk个数 + * @param[in] copysetPath chunk directory + * @return returns the number of chunks */ uint32_t CountChunkNumInCopyset(const std::string ©setPath); private: - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for expiredAfterSec seconds int expiredAfterSec_; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec_; - // 回收站中chunk的个数 + // Number of chunks in the Recycle Bin Atomic chunkNum_; Mutex mtx_; - // 本地文件系统 + // Local File System std::shared_ptr localFileSystem_; - // chunk池子 + // chunk Pool std::shared_ptr chunkFilePool_; // wal pool std::shared_ptr walPool_; - // 回收站全路径 + // Recycle Bin Full Path std::string trashPath_; - // 后台清理回收站的线程 + // Thread for background cleaning of the recycle bin Thread recycleThread_; - // false-开始后台任务,true-停止后台任务 + // false-Start background task, true-Stop background task Atomic isStop_; InterruptibleSleeper sleeper_; diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index d1b8c02a47..7a74e5aedb 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -34,7 +34,7 @@ #include "src/client/service_helper.h" #include "src/client/io_tracker.h" -// TODO(tongguangxun) :优化重试逻辑,将重试逻辑与RPC返回逻辑拆开 +// TODO(tongguangxun): Optimize retry logic by separating the retry logic from the RPC return logic namespace curve { namespace client { @@ -44,25 +44,25 @@ FailureRequestOption ClientClosure::failReqOpt_; void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { RequestClosure* reqDone = static_cast(done_); - // 如果对应的cooysetId leader可能发生变更 - // 那么设置这次重试请求超时时间为默认值 - // 这是为了尽快重试这次请求 - // 从copysetleader迁移到client GetLeader获取到新的leader会有1~2s的延迟 - // 对于一个请求来说,GetLeader仍然可能返回旧的Leader - // rpc timeout时间可能会被设置成2s/4s,等到超时后再去获取leader信息 - // 为了尽快在新的Leader上重试请求,将rpc timeout时间设置为默认值 + // If the corresponding cooysetId leader may change + // So set the timeout time for this retry request to the default value + // This is to retry this request as soon as possible + // There will be a delay of 1-2 seconds when migrating from copysetleader to client GetLeader to obtain a new leader + // For a request, GetLeader may still return the old Leader + // The rpc timeout time may be set to 2s/4s, and the leader information will be obtained after the timeout + // To retry the request on the new Leader as soon as possible, set the rpc timeout time to the default value if (cntlstatus == brpc::ERPCTIMEDOUT || cntlstatus == ETIMEDOUT) { uint64_t nextTimeout = 0; uint64_t retriedTimes = reqDone->GetRetriedTimes(); bool leaderMayChange = metaCache_->IsLeaderMayChange( chunkIdInfo_.lpid_, chunkIdInfo_.cpid_); - // 当某一个IO重试超过一定次数后,超时时间一定进行指数退避 - // 当底层chunkserver压力大时,可能也会触发unstable - // 由于copyset leader may change,会导致请求超时时间设置为默认值 - // 而chunkserver在这个时间内处理不了,导致IO hang - // 真正宕机的情况下,请求重试一定次数后会处理完成 - // 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 + // When a certain IO retry exceeds a certain number of times, an exponential backoff must be performed during the timeout period + // When the underlying chunkserver is under high pressure, unstable may also be triggered + // Due to copyset leader may change, the request timeout time will be set to the default value + // And chunkserver cannot process it within this time, resulting in IO hang + // In the case of real downtime, the request will be processed after a certain number of retries + // If you keep trying again, it's not a downtime situation, and at this point, the timeout still needs to enter the exponential backoff logic if (retriedTimes < failReqOpt_.chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT leaderMayChange) { nextTimeout = failReqOpt_.chunkserverRPCTimeoutMS; @@ -153,10 +153,10 @@ uint64_t ClientClosure::TimeoutBackOff(uint64_t currentRetryTimes) { return nextTimeout; } -// 统一请求回调函数入口 -// 整体处理逻辑与之前相同 -// 针对不同的请求类型和返回状态码,进行相应的处理 -// 各子类需要实现SendRetryRequest,进行重试请求 +// Unified Request Callback Function Entry +// The overall processing logic is the same as before +// Perform corresponding processing for different request types and return status codes +// Each subclass needs to implement SendRetryRequest for retry requests void ClientClosure::Run() { std::unique_ptr selfGuard(this); std::unique_ptr cntlGuard(cntl_); @@ -176,42 +176,42 @@ void ClientClosure::Run() { needRetry = true; OnRpcFailed(); } else { - // 只要rpc正常返回,就清空超时计数器 + // As long as RPC returns normally, clear the timeout counter metaCache_->GetUnstableHelper().ClearTimeout( chunkserverID_, chunkserverEndPoint_); status_ = GetResponseStatus(); switch (status_) { - // 1. 请求成功 + // 1. Request successful case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: OnSuccess(); break; - // 2.1 不是leader + // 2.1 is not a leader case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: MetricHelper::IncremRedirectRPCCount(fileMetric_, reqCtx_->optype_); needRetry = true; OnRedirected(); break; - // 2.2 Copyset不存在,大概率都是配置变更了 + // 2.2 Copyset does not exist, most likely due to configuration changes case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: needRetry = true; OnCopysetNotExist(); break; - // 2.3 chunk not exist,直接返回,不用重试 + // 2.3 Chunk not exist, return directly without retry case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: OnChunkNotExist(); break; - // 2.4 非法参数,直接返回,不用重试 + // 2.4 Illegal parameter, returned directly without retry case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: OnInvalidRequest(); break; - // 2.5 返回backward + // 2.5 Return to feedback case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: if (reqCtx_->optype_ == OpType::WRITE) { needRetry = true; @@ -229,7 +229,7 @@ void ClientClosure::Run() { } break; - // 2.6 返回chunk exist,直接返回,不用重试 + // 2.6 Return Chunk Exist, directly return without retrying case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: OnChunkExist(); break; @@ -264,9 +264,9 @@ void ClientClosure::OnRpcFailed() { status_ = cntl_->ErrorCode(); - // 如果连接失败,再等一定时间再重试 + // If the connection fails, wait for a certain amount of time before trying again if (cntlstatus_ == brpc::ERPCTIMEDOUT) { - // 如果RPC超时, 对应的chunkserver超时请求次数+1 + // If RPC times out, the corresponding number of chunkserver timeout requests+1 metaCache_->GetUnstableHelper().IncreTimeout(chunkserverID_); MetricHelper::IncremTimeOutRPCCount(fileMetric_, reqCtx_->optype_); } @@ -443,8 +443,8 @@ void ClientClosure::RefreshLeader() { << ", IO id = " << reqDone_->GetIOTracker()->GetID() << ", request id = " << reqCtx_->id_; } else { - // 如果refresh leader获取到了新的leader信息 - // 则重试之前不进行睡眠 + // If the refresh leader obtains new leader information + // Do not sleep before retrying retryDirectly_ = (leaderId != chunkserverID_); } } diff --git a/src/client/chunk_closure.h b/src/client/chunk_closure.h index f5d9acd220..e839b65b97 100644 --- a/src/client/chunk_closure.h +++ b/src/client/chunk_closure.h @@ -49,8 +49,8 @@ class MetaCache; class CopysetClient; /** - * ClientClosure,负责保存Rpc上下文, - * 包含cntl和response已经重试次数 + * ClientClosure, responsible for saving Rpc context, + * Contains cntl and response retries */ class ClientClosure : public Closure { public: @@ -83,43 +83,43 @@ class ClientClosure : public Closure { return chunkserverEndPoint_; } - // 统一Run函数入口 + // Unified Run Function Entry void Run() override; - // 重试请求 + // Retrying the request void OnRetry(); - // Rpc Failed 处理函数 + // Rpc Failed processing function void OnRpcFailed(); - // 返回成功 处理函数 + // Return successful processing function virtual void OnSuccess(); - // 返回重定向 处理函数 + // Return redirection processing function virtual void OnRedirected(); - // copyset不存在 + // copyset does not exist void OnCopysetNotExist(); - // 返回backward + // Return backward void OnBackward(); - // 返回chunk不存在 处理函数 + // Returning chunk with no processing function present virtual void OnChunkNotExist(); - // 返回chunk存在 处理函数 + // Return Chunk Existence Processing Function void OnChunkExist(); // handle epoch too old void OnEpochTooOld(); - // 非法参数 + // Illegal parameter void OnInvalidRequest(); - // 发送重试请求 + // Send retry request virtual void SendRetryRequest() = 0; - // 获取response返回的状态码 + // Obtain the status code returned by the response virtual CHUNK_OP_STATUS GetResponseStatus() const { return response_->status(); } @@ -142,7 +142,7 @@ class ClientClosure : public Closure { return done_; } - // 测试使用,设置closure + // Test usage, set closure void SetClosure(Closure* done) { done_ = done; } @@ -152,25 +152,25 @@ class ClientClosure : public Closure { } /** - * 在重试之前根据返回值进行预处理 - * 场景1: rpc timeout,那么这时候会指数增加当前rpc的超时时间,然后直接进行重试 - * 场景2:底层OVERLOAD,那么需要在重试之前睡眠一段时间,睡眠时间根据重试次数指数增长 - * @param: rpcstatue为rpc返回值 - * @param: cntlstatus为本次rpc controller返回值 + * Preprocess based on the return value before retrying + * Scenario 1: rpc timeout, which will exponentially increase the current rpc timeout and then directly retry + * Scenario 2: Underlying Overload, then it is necessary to sleep for a period of time before retrying, and the sleep time increases exponentially based on the number of retries + * @param: rpcstatue returns the value for rpc + * @param: cntlstatus is the return value of this rpc controller */ void PreProcessBeforeRetry(int rpcstatue, int cntlstatus); /** - * 底层chunkserver overload之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回当前的需要睡眠的时间 + * After the underlying chunkserver overload, it is necessary to backoff based on the number of retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the current time required for sleep */ static uint64_t OverLoadBackOff(uint64_t currentRetryTimes); /** - * rpc timeout之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回下一次RPC 超时时间 + * After the rpc timeout, it is necessary to backoff based on the number of retries + * @param: currentRetryTimes is the current number of retries + * @return: Returns the next RPC timeout time */ static uint64_t TimeoutBackOff(uint64_t currentRetryTimes); @@ -213,25 +213,25 @@ class ClientClosure : public Closure { std::unique_ptr response_; CopysetClient* client_; Closure* done_; - // 这里保存chunkserverID,是为了区别当前这个rpc是发给哪个chunkserver的 - // 这样方便在rpc closure里直接找到,当前是哪个chunkserver返回的失败 + // The chunkserverID is saved here to distinguish which chunkserver the current rpc is sent to + // This makes it easy to directly find which chunkserver is currently returning the failure in the rpc closure ChunkServerID chunkserverID_; butil::EndPoint chunkserverEndPoint_; - // 记录当前请求的相关信息 + // Record relevant information for the current request MetaCache* metaCache_; RequestClosure* reqDone_; FileMetric* fileMetric_; RequestContext* reqCtx_; ChunkIDInfo chunkIdInfo_; - // 发送重试请求前是否睡眠 + // Whether to sleep before sending a retry request bool retryDirectly_ = false; - // response 状态码 + // response status code int status_; - // rpc 状态码 + // rpc status code int cntlstatus_; }; diff --git a/src/client/client_common.h b/src/client/client_common.h index 8620f050d1..8bb4b479c7 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -53,7 +53,7 @@ constexpr uint64_t KiB = 1024; constexpr uint64_t MiB = 1024 * KiB; constexpr uint64_t GiB = 1024 * MiB; -// 操作类型 +// Operation type enum class OpType { READ = 0, WRITE, @@ -67,7 +67,7 @@ enum class OpType { }; /** - * 与nameserver.proto中的FileStatus一一对应 + * Corresponds one-to-one with FileStatus in nameserver.proto */ enum class FileStatus { Created = 0, @@ -95,7 +95,7 @@ typedef struct ChunkIDInfo { } } ChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct ChunkInfoDetail { std::vector chunkSn; } ChunkInfoDetail_t; @@ -106,7 +106,7 @@ typedef struct LeaseSession { uint64_t createTime; } LeaseSession_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to the segment in the logicalpool typedef struct LogicalPoolCopysetIDInfo { LogicPoolID lpid; std::vector cpidVec; @@ -117,7 +117,7 @@ typedef struct LogicalPoolCopysetIDInfo { } } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +//Save basic information for each segment typedef struct SegmentInfo { uint32_t segmentsize; uint32_t chunksize; @@ -147,9 +147,9 @@ typedef struct FInfo { uint64_t length; uint64_t ctime; uint64_t seqnum; - // userinfo是当前操作这个文件的用户信息 + // userinfo is the user information currently operating on this file UserInfo_t userinfo; - // owner是当前文件所属信息 + // owner is the information to which the current file belongs std::string owner; std::string filename; std::string fullPathName; @@ -187,10 +187,10 @@ typedef struct FileEpoch { } } FileEpoch_t; -// PeerAddr 代表一个copyset group里的一个chunkserver节点 -// 与braft中的PeerID对应 +// PeerAddr represents a chunkserver node in a copyset group +// Corresponds to PeerID in braft struct PeerAddr { - // 节点的地址信息 + // Address information of nodes EndPoint addr_; PeerAddr() = default; @@ -201,13 +201,13 @@ struct PeerAddr { addr_.socket_file.empty(); } - // 重置当前地址信息 + // Reset current address information void Reset() { addr_.ip = butil::IP_ANY; addr_.port = 0; } - // 从字符串中将地址信息解析出来 + // Parse address information from a string int Parse(const std::string &str) { int idx; char ip_str[64]; @@ -224,8 +224,8 @@ struct PeerAddr { return 0; } - // 将该节点地址信息转化为字符串形式 - // 在get leader调用中可以将该值直接传入request + // Convert the node address information into a string format + // In the get leader call, this value can be directly passed into the request std::string ToString() const { char str[128]; snprintf(str, sizeof(str), "%s:%d", butil::endpoint2str(addr_).c_str(), diff --git a/src/client/client_metric.h b/src/client/client_metric.h index 396494f6ab..bb0a467d2b 100644 --- a/src/client/client_metric.h +++ b/src/client/client_metric.h @@ -41,20 +41,20 @@ inline void GetStringValue(std::ostream& os, void* arg) { os << *static_cast(arg); } -// 悬挂IO统计,文件级别统计,方便定位 +//Suspend IO statistics and file level statistics for easy positioning struct IOSuspendMetric { - // 当前persecond计数总数 + //Current total number of second counts bvar::Adder count; IOSuspendMetric(const std::string& prefix, const std::string& name) : count(prefix, name + "_total_count") {} }; -// 秒级信息统计 +//Second-level information statistics struct PerSecondMetric { - // 当前persecond计数总数 + //Current total number of second counts bvar::Adder count; - // persecond真实数据,这个数据依赖于count + //persecond real data depends on the count bvar::PerSecond> value; PerSecondMetric(const std::string& prefix, const std::string& name) @@ -62,21 +62,21 @@ struct PerSecondMetric { value(prefix, name, &count, 1) {} }; -// 接口统计信息metric信息统计 +//Interface statistics information metric information statistics struct InterfaceMetric { - // 接口统计信息调用qps + //Call qps for interface statistics information PerSecondMetric qps; // error request persecond PerSecondMetric eps; // receive request persecond PerSecondMetric rps; - // 调用吞吐 + //Call throughput PerSecondMetric bps; - // 调用超时次数qps + //Call timeout count qps PerSecondMetric timeoutQps; - // 调用redirect次数qps + //Number of calls to redirect qps PerSecondMetric redirectQps; - // 调用latency + //Call latency bvar::LatencyRecorder latency; InterfaceMetric(const std::string& prefix, const std::string& name) @@ -102,36 +102,36 @@ struct DiscardMetric { bvar::Adder pending; }; -// 文件级别metric信息统计 +//File level metric information statistics struct FileMetric { const std::string prefix = "curve_client"; - // 当前metric归属于哪个文件 + //Which file does the current metric belong to std::string filename; - // 当前文件inflight io数量 + //Current file inflight io quantity bvar::Adder inflightRPCNum; - // 当前文件请求的最大请求字节数,这种统计方式可以很方便的看到最大值,分位值 + //The maximum number of request bytes for the current file request, which is a convenient statistical method to see the maximum and quantile values bvar::LatencyRecorder readSizeRecorder; bvar::LatencyRecorder writeSizeRecorder; bvar::LatencyRecorder discardSizeRecorder; - // libcurve最底层read rpc接口统计信息metric统计 + //Libcurve's lowest level read rpc interface statistics information metric statistics InterfaceMetric readRPC; - // libcurve最底层write rpc接口统计信息metric统计 + //Libcurve's lowest level write rpc interface statistics information metric statistics InterfaceMetric writeRPC; - // 用户读请求qps、eps、rps + //User Read Request QPS, EPS, RPS InterfaceMetric userRead; - // 用户写请求qps、eps、rps + //User write request QPS, EPS, RPS InterfaceMetric userWrite; // user's discard request InterfaceMetric userDiscard; - // get leader失败重试qps + //Get leader failed and retry qps PerSecondMetric getLeaderRetryQPS; - // 当前文件上的悬挂IO数量 + //The number of suspended IOs on the current file IOSuspendMetric suspendRPCMetric; DiscardMetric discardMetric; @@ -152,52 +152,52 @@ struct FileMetric { discardMetric(prefix + filename) {} }; -// 用于全局mds接口统计信息调用信息统计 +//Used for global mds interface statistics, call information statistics struct MDSClientMetric { std::string prefix; - // mds的地址信息 + //Address information of mds std::string metaserverAddr; bvar::PassiveStatus metaserverAddress; - // openfile接口统计信息 + //openfile interface statistics InterfaceMetric openFile; - // createFile接口统计信息 + //createFile interface statistics InterfaceMetric createFile; - // closeFile接口统计信息 + //closeFile interface statistics InterfaceMetric closeFile; - // getFileInfo接口统计信息 + //GetFileInfo interface statistics InterfaceMetric getFile; - // RefreshSession接口统计信息 + //RefreshSession Interface Statistics InterfaceMetric refreshSession; - // GetServerList接口统计信息 + //GetServerList interface statistics InterfaceMetric getServerList; - // GetOrAllocateSegment接口统计信息 + //GetOrAllocateSegment interface statistics InterfaceMetric getOrAllocateSegment; - // DeAllocateSegment接口统计信息 + //DeAllocateSegment Interface Statistics InterfaceMetric deAllocateSegment; - // RenameFile接口统计信息 + //RenameFile Interface Statistics InterfaceMetric renameFile; - // Extend接口统计信息 + //Extend Interface Statistics InterfaceMetric extendFile; - // DeleteFile接口统计信息 + //deleteFile interface statistics InterfaceMetric deleteFile; // RecoverFile interface metric InterfaceMetric recoverFile; - // changeowner接口统计信息 + //changeowner Interface Statistics InterfaceMetric changeOwner; - // listdir接口统计信息 + //Listdir interface statistics InterfaceMetric listDir; - // register接口统计信息 + //Register Interface Statistics InterfaceMetric registerClient; - // GetChunkServerID接口统计 + //GetChunkServerID interface statistics InterfaceMetric getChunkServerId; - // ListChunkServerInServer接口统计 + //ListChunkServerInServer Interface Statistics InterfaceMetric listChunkserverInServer; // IncreaseEpoch InterfaceMetric increaseEpoch; - // 切换mds server总次数 + //Total number of switching MDS server bvar::Adder mdsServerChangeTimes; explicit MDSClientMetric(const std::string& prefix_ = "") @@ -244,8 +244,8 @@ struct LatencyGuard { class MetricHelper { public: /** - * 统计getleader重试次数 - * @param: fm为当前文件的metric指针 + * Count the number of retries for getleader + * @param: fm is the metric pointer of the current file */ static void IncremGetLeaderRetryTime(FileMetric* fm) { if (fm != nullptr) { @@ -254,10 +254,10 @@ class MetricHelper { } /** - * 统计用户当前读写请求次数,用于qps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of read and write requests from users for QPS calculation + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write operation */ static void IncremUserQPSCount(FileMetric* fm, uint64_t length, @@ -285,9 +285,9 @@ class MetricHelper { } /** - * 统计用户当前读写请求失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of failed read/write requests by users for EPS calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write operation */ static void IncremUserEPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -307,13 +307,13 @@ class MetricHelper { } /** - * 统计用户当前接收到的读写请求次数,用于rps计算 - * rps: receive request persecond, 就是当前接口每秒接收到的请求数量 - * qps: query request persecond, 就是当前接口每秒处理的请求数量 - * eps: error request persecond, 就是当前接口每秒出错的请求数量 - * rps减去qps就是当前client端每秒钟等待的请求数量,这部分请求会持久占用当前一秒内的内存 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the number of read and write requests currently received by the user for RPS calculation + * rps: receive request persecond, which is the number of requests received by the current interface per second + * qps: query request persecond, which is the number of requests processed by the current interface per second + * eps: error request persecond, which is the number of requests that make errors per second on the current interface + * rps minus qps is the number of requests that the current client is waiting for per second, which will persistently occupy the current memory for one second + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write operation */ static void IncremUserRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -333,9 +333,9 @@ class MetricHelper { } /** - * 统计当前rpc失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of RPC failures for EPS calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write operation */ static void IncremFailRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -353,9 +353,9 @@ class MetricHelper { } /** - * 统计用户当前读写请求超时次数,用于timeoutQps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Counts the number of times a user's current read/write request has timed out, used for timeoutQps calculation + * @param: fm is the metric pointer of the current file + * @param: read is whether the current operation is a read or write operation */ static void IncremTimeOutRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -373,9 +373,9 @@ class MetricHelper { } /** - * 统计请求被redirect的次数 - * @param fileMetric 当前文件的metric指针 - * @param opType 请求类型 + * Count the number of times requests have been redirected + * @param fileMetric The metric pointer of the current file + * @param opType request type */ static void IncremRedirectRPCCount(FileMetric* fileMetric, OpType opType) { if (fileMetric) { @@ -393,10 +393,10 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于qps及bps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing RPC interfaces, used for QPS and bps calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write operation */ static void IncremRPCQPSCount(FileMetric* fm, uint64_t length, @@ -418,10 +418,10 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于rps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + *Statistics of the number of requests and bandwidth for reading and writing RPC interfaces, used for RPS calculations + * @param: fm is the metric pointer of the current file + * @param: length is the current request size + * @param: read is whether the current operation is a read or write operation */ static void IncremRPCRPSCount(FileMetric* fm, OpType type) { diff --git a/src/client/config_info.h b/src/client/config_info.h index 5c465386f4..b7d6aa98ab 100644 --- a/src/client/config_info.h +++ b/src/client/config_info.h @@ -31,9 +31,9 @@ namespace curve { namespace client { /** - * log的基本配置信息 - * @logLevel: 是log打印等级 - * @logPath: log打印位置 + * Basic configuration information of log + * @logLevel: It is the log printing level + * @logPath: Log printing location */ struct LogInfo { int logLevel = 2; @@ -41,8 +41,8 @@ struct LogInfo { }; /** - * in flight IO控制信息 - * @fileMaxInFlightRPCNum: 为一个文件中最大允许的inflight IO数量 + * in flight IO control information + * @fileMaxInFlightRPCNum: is the maximum allowed number of inflight IOs in a file */ struct InFlightIOCntlInfo { uint64_t fileMaxInFlightRPCNum = 2048; @@ -78,27 +78,27 @@ struct MetaServerOption { }; /** - * 租约基本配置 - * @mdsRefreshTimesPerLease: 一个租约内续约次数,client与mds之间通过租约保持心跳 - * 如果双方约定租约有效期为10s,那么client会在这10s内 - * 发送mdsRefreshTimesPerLease次心跳,如果连续失败, - * 那么client认为当前mds存在异常,会阻塞后续的IO,直到 - * 续约成功。 + * Basic configuration of lease + * @mdsRefreshTimesPerLease: The number of renewals within a lease, and the heartbeat between client and mds is maintained through the lease + * If both parties agree that the lease term is 10 seconds, then the client will be within these 10 seconds + * send mdsRefreshTimesPerLease heartbeats, if consecutive failures occur, + * So the client believes that there is an abnormality in the current mds, which will block subsequent IO until + * successfully renewed the contract. */ struct LeaseOption { uint32_t mdsRefreshTimesPerLease = 5; }; /** - * rpc超时,判断是否unstable的参数 + * RPC timeout, parameter to determine if it is unstable * @maxStableChunkServerTimeoutTimes: - * 一个chunkserver连续超时请求的阈值, 超过之后会检查健康状态, - * 如果不健康,则标记为unstable + * The threshold for a chunkserver to continuously timeout requests, after which the health status will be checked, + * If not healthy, mark as unstable * @checkHealthTimeoutMS: - * 检查chunkserver是否健康的http请求超时时间 + * Check if chunkserver is healthy HTTP request timeout * @serverUnstableThreashold: - * 一个server上超过serverUnstableThreashold个chunkserver都标记为unstable, - * 整个server上的所有chunkserver都标记为unstable + * More than serverUnstableThreashold chunkservers on a server are marked as unstable, + * All chunkservers on the entire server are marked as unstable */ struct ChunkServerUnstableOption { uint32_t maxStableChunkServerTimeoutTimes = 64; @@ -107,42 +107,42 @@ struct ChunkServerUnstableOption { }; /** - * 发送失败的chunk request处理 + * Handling of failed chunk request: * @chunkserverOPMaxRetry: - * 最大重试次数,一个RPC下发到底层chunkserver,最大允许的失败 - * 次数,超限之后会向上返回用户。 + * Maximum retry count allowed for an RPC sent to the underlying chunk server. + * If exceeded, it will be propagated to the user. * @chunkserverOPRetryIntervalUS: - * 相隔多久再重试,一个rpc失败之后client会根据其返回 - * 状态决定是否需要睡眠一段时间再重试,目前除了 - * TIMEOUT、REDIRECTED,这两种返回值,其他返回值都是需要 - * 先睡眠一段时间再重试。 - * @chunkserverRPCTimeoutMS: 为每个rpc发送时,其rpc controller配置的超时时间 + * Time interval between retries. After a failed RPC, the client will sleep for a period + * determined by the RPC response status before retrying. Currently, + * except for TIMEOUT and REDIRECTED, all other response values + * require sleeping for some time before retrying. + * @chunkserverRPCTimeoutMS: Timeout configured for each RPC sent when creating its RPC controller. * @chunkserverMaxRPCTimeoutMS: - * 在底层chunkserver返回TIMEOUT时,说明当前请求在底层 - * 无法及时得到处理,原因可能是底层的排队任务太多,这时候如果 - * 以相同的rpc - * 超时时间再去发送请求,很有可能最后还是超时, - * 所以为了避免底层处理请求时,rpc在client一侧已经超时的这种 - * 状况,为rpc超时时间增加了指数退避逻辑,超时时间会逐渐增加, - * 最大不能超过该值。 + * When the underlying chunkserver returns TIMEOUT, it means the current request cannot be + * processed promptly, possibly due to a large number of queued tasks. In such cases, sending requests + * with the same RPC timeout again + * may still result in timeouts. + * To avoid this, exponential backoff + * logic is applied to increase the timeout gradually, but it cannot + * exceed this maximum value. * @chunkserverMaxRetrySleepIntervalUS: - * 在底层返回OVERLOAD时,表明当前chunkserver - * 压力过大,这时候睡眠时间会进行指数退避,睡眠时间会加长,这样 - * 能够保证client的请求不会将底层chunkserver打满,但是睡眠时间 - * 最长不能超过该值。 - * @chunkserverMaxStableTimeoutTimes: 一个chunkserver连续超时请求的阈值, - * 超过之后 会标记为unstable。因为一个chunkserver所在的server如果宕机 - * 那么发向该chunkserver的请求都会超时,如果同一个chunkserver - * 的rpc连续超时超过该阈值,那么client就认为这个chunkserver - * 所在的server可能宕机了,就将该server上的所有leader - * copyset 标记为unstable,促使其下次发送rpc前,先去getleader。 + * When the underlying chunk server returns OVERLOAD, indicating excessive pressure, + * the sleep interval is exponentially extended to ensure that client + * requests do not overwhelm the underlying chunk server. However, + * the maximum sleep time cannot exceed this value. + * @chunkserverMaxStableTimeoutTimes: + * Threshold for consecutive timeouts on an RPC from a chunk server. If exceeded, the chunk server is marked as unstable. This is because if a server + * where a chunk server resides crashes, requests sent to that chunk server will all time out. + * If the same chunk server's RPCs consecutively timeout beyond this threshold, + * the client assumes that the server where it resides may have crashed + * and marks all leader copysets on that server as unstable, prompting a leader retrieval before sending any RPCs. * @chunkserverMinRetryTimesForceTimeoutBackoff: - * 当一个请求重试次数超过阈值时,还在重试 使其超时时间进行指数退避 + * When a request exceeds the retry threshold, the timeout is exponentially increased, forcing a backoff. * @chunkserverMaxRetryTimesBeforeConsiderSuspend: - * rpc重试超过这个次数后被认为是悬挂IO, - * 因为发往chunkserver底层的rpc重试次数非常大,如果一个rpc连续 - * 失败超过该阈值的时候,可以认为当前IO处于悬挂状态,通过metric - * 向上报警。 + * When the number of RPC retries exceeds this threshold, it is considered a suspended IO. + * Since RPC retries sent to the chunkserver can be very high, if an + * RPC fails consecutively beyond this threshold, it can be considered + * that the current IO is in a suspended state, and an alarm is raised through metrics reporting. */ struct FailureRequestOption { uint32_t chunkserverOPMaxRetry = 3; @@ -155,9 +155,9 @@ struct FailureRequestOption { }; /** - * 发送rpc给chunkserver的配置 - * @inflightOpt: 一个文件向chunkserver发送请求时的inflight 请求控制配置 - * @failRequestOpt: rpc发送失败之后,需要进行rpc重试的相关配置 + * Configuration for sending rpc to chunkserver + * @inflightOpt: Configuration of inflight request control when a file sends a request to chunkserver + * @failRequestOpt: After rpc sending fails, relevant configuration for rpc retry needs to be carried out */ struct IOSenderOption { InFlightIOCntlInfo inflightOpt; @@ -165,10 +165,10 @@ struct IOSenderOption { }; /** - * scheduler模块基本配置信息,schedule模块是用于分发用户请求,每个文件有自己的schedule - * 线程池,线程池中的线程各自配置一个队列 - * @scheduleQueueCapacity: schedule模块配置的队列深度 - * @scheduleThreadpoolSize: schedule模块线程池大小 + Basic Configuration Information for the Scheduler Module + * The scheduler module is used for distributing user requests. Each file has its own scheduler thread pool, and each thread in the pool is configured with its own queue. + * @scheduleQueueCapacity: The queue depth configured by the schedule module + * @scheduleThreadpoolSize: schedule module thread pool size */ struct RequestScheduleOption { uint32_t scheduleQueueCapacity = 1024; @@ -177,26 +177,26 @@ struct RequestScheduleOption { }; /** - * metaccache模块配置信息 + * MetaCache Module Configuration * @metacacheGetLeaderRetry: - * 获取leader重试次数,一个rpc发送到chunkserver之前需要先 - * 获取当前copyset的leader,如果metacache中没有这个信息, - * 就向copyset的peer发送getleader请求,如果getleader失败, - * 需要重试,最大重试次数为该值。 + * Number of retries to get the leader. Before an RPC is sent to the chunkserver, it needs to first + * obtain the leader for the current copyset. If this information is not available in the metacache, + * a getleader request is sent to a copyset's peers. If getleader fails, it needs to be retried, with a maximum + * retry count defined by this value. * @metacacheRPCRetryIntervalUS: - * 如上所述,如果getleader请求失败,会发起重试,但是并 - * 不会立即进行重试,而是选择先睡眠一段时间在重试。该值代表 - * 睡眠长度。 - * @metacacheGetLeaderRPCTimeOutMS: 发送getleader rpc请求的rpc - * controller最大超时时间 + * As mentioned above, if a getleader request fails, it will be retried, but not immediately. Instead, + * there will be a delay before the retry. + * This value represents the length of that delay. + * @metacacheGetLeaderRPCTimeOutMS: The maximum timeout duration for the RPC + * controller when sending a 'getleader' RPC request * @metacacheGetLeaderBackupRequestMS: - * 因为一个copyset有三个或者更多的peer,getleader - * 会以backuprequest的方式向这些peer发送rpc,在brpc内部 - * 会串行发送,如果第一个请求超过一定时间还没返回,就直接向 - * 下一个peer发送请求,而不用等待上一次请求返回或超时,这个触发 - * backup request的时间就为该值。 - * @metacacheGetLeaderBackupRequestLbName: 为getleader backup rpc - * 选择底层服务节点的策略 + * Since a copyset has three or more peers, getleader requests are + * sent to these peers in a backuprequest manner. + * Internally, in brpc, these requests are sent serially. If the first request takes too long to return, the next + * request is sent to the next peer without waiting for the previous one to return or time out. The time at which + * backup requests are triggered is determined by this value. + * @metacacheGetLeaderBackupRequestLbName: Strategy for selecting the underlying service nodes + * for getleader backup RPCs. */ struct MetaCacheOption { uint32_t metacacheGetLeaderRetry = 3; @@ -209,21 +209,21 @@ struct MetaCacheOption { }; /** - * IO 拆分模块配置信息 + * IO Split Module Configuration * @fileIOSplitMaxSizeKB: - * 用户下发IO大小client没有限制,但是client会将用户的IO进行拆分, - * 发向同一个chunkserver的请求锁携带的数据大小不能超过该值。 + * The size of user-issued IOs is not restricted by the client. However, the client will split the user's IOs, + * and the data size carried by requests sent to the same chunkserver cannot exceed this value. */ struct IOSplitOption { uint64_t fileIOSplitMaxSizeKB = 64; }; /** - * 线程隔离任务队列配置信息 - * 线程隔离主要是为了上层做异步接口调用时,直接将其调用任务推到线程池中而不是让其阻塞到放入 - * 分发队列线程池。 - * @isolationTaskQueueCapacity: 隔离线程池的队列深度 - * @isolationTaskThreadPoolSize: 隔离线程池容量 + * Configuration information for thread-isolated task queues. + * Thread isolation is primarily used to push asynchronous interface calls + * directly into the thread pool instead of blocking them until they are placed in the dispatch queue thread pool. + * @isolationTaskQueueCapacity: The queue depth of the isolation thread pool. + * @isolationTaskThreadPoolSize: The capacity of the isolation thread pool. */ struct TaskThreadOption { uint64_t isolationTaskQueueCapacity = 500000; @@ -251,7 +251,7 @@ struct ThrottleOption { }; /** - * IOOption存储了当前io 操作所需要的所有配置信息 + * IOOption stores all the configuration information required for the current IO operation */ struct IOOption { IOSplitOption ioSplitOpt; @@ -265,11 +265,11 @@ struct IOOption { }; /** - * client一侧常规的共同的配置信息 - * @mdsRegisterToMDS: 是否向mds注册client信息,因为client需要通过dummy - * server导出 metric信息,为了配合普罗米修斯的自动服务发现机制,会将其监听的 - * ip和端口信息发送给mds。 - * @turnOffHealthCheck: 是否关闭健康检查 + * Common client-side configuration options: + * @mdsRegisterToMDS: Whether to register client information with the MDS. Since the client + * needs to export metric information through a dummy server to support + * Prometheus's automatic service discovery mechanism, it sends its listening IP and port information to the MDS. + * @turnOffHealthCheck: Whether to disable health checks. */ struct CommonConfigOpt { bool mdsRegisterToMDS = false; @@ -277,7 +277,7 @@ struct CommonConfigOpt { }; /** - * ClientConfigOption是外围快照系统需要设置的配置信息 + * ClientConfigOption is the configuration information that needs to be set for the peripheral snapshot system */ struct ClientConfigOption { LogInfo loginfo; @@ -307,7 +307,7 @@ struct ChunkServerClientRetryOptions { }; /** - * FileServiceOption是QEMU侧总体配置信息 + * FileServiceOption is the overall configuration information on the QEMU side */ struct FileServiceOption { LogInfo loginfo; diff --git a/src/client/copyset_client.cpp b/src/client/copyset_client.cpp index 964929d18f..4f65a93b4b 100644 --- a/src/client/copyset_client.cpp +++ b/src/client/copyset_client.cpp @@ -64,13 +64,13 @@ int CopysetClient::Init(MetaCache *metaCache, } bool CopysetClient::FetchLeader(LogicPoolID lpid, CopysetID cpid, ChunkServerID* leaderid, butil::EndPoint* leaderaddr) { - // 1. 先去当前metacache中拉取leader信息 + // 1. First, pull the leader information from the current metacache if (0 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, false, fileMetric_)) { return true; } - // 2. 如果metacache中leader信息拉取失败,就发送RPC请求获取新leader信息 + // 2. If the pull of leader information in the metacache fails, send an RPC request to obtain new leader information if (-1 == metaCache_->GetLeader(lpid, cpid, leaderid, leaderaddr, true, fileMetric_)) { LOG(WARNING) << "Get leader address form cache failed, but " @@ -82,11 +82,11 @@ bool CopysetClient::FetchLeader(LogicPoolID lpid, CopysetID cpid, return true; } -// 因为这里的CopysetClient::ReadChunk(会在两个逻辑里调用 -// 1. 从request scheduler下发的新的请求 -// 2. clientclosure再重试逻辑里调用copyset client重试 -// 这两种状况都会调用该接口,因为对于重试的RPC有可能需要重新push到队列中 -// 非重试的RPC如果重新push到队列中会导致死锁。 +// Because the CopysetClient::ReadChunk (will be called in two logics) here +// 1. New requests issued from the request scheduler +// 2. Calling copyset client to retry in the clientclosure retry logic +// Both of these situations will call the interface, as retrying RPCs may require re pushing to the queue +// If non retrying RPC is pushed back into the queue, it will cause a deadlock. int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, off_t offset, size_t length, const RequestSourceInfo& sourceInfo, @@ -94,15 +94,15 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, RequestClosure* reqclosure = static_cast(done); brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of the retry RPC here + // Will not block the brpc thread as it is common to all files. Avoid affecting other files + // Because the session renewal failure may only be a network issue, IO is actually still possible after the renewal is successful + // Normal distribution, so failure cannot be directly returned upwards. Hang on at the bottom and continue sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return to the user through closure + // After the return call, doneguard will call the run of the closure, releasing the inflight rpc count, + // Then the closure is returned to the user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" @@ -113,7 +113,7 @@ int CopysetClient::ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, << ", len = " << length; return 0; } else { - // session过期之后需要重新push到队列 + // After the session expires, it needs to be re pushed to the queue LOG(WARNING) << "session not valid, read rpc ReSchedule!"; doneGuard.release(); reqclosure->ReleaseInflightRPCToken(); @@ -146,15 +146,15 @@ int CopysetClient::WriteChunk(const ChunkIDInfo& idinfo, brpc::ClosureGuard doneGuard(done); - // session过期情况下重试有两种场景: - // 1. 正常重试过程,非文件关闭状态,这时候RPC直接重新push到scheduler队列头部 - // 重试调用是在brpc的线程里,所以这里不会卡住重试的RPC,这样 - // 不会阻塞brpc线程,因为brpc线程是所有文件公用的。避免影响其他文件 - // 因为session续约失败可能只是网络问题,等待续约成功之后IO其实还可以 - // 正常下发,所以不能直接向上返回失败,在底层hang住,等续约成功之后继续发送 - // 2. 在关闭文件过程中exitFlag_=true,重试rpc会直接向上通过closure返回给用户 - // return调用之后doneguard会调用closure的run,会释放inflight rpc计数, - // 然后closure向上返回给用户。 + // There are two scenarios for retrying when a session expires: + // 1. During the normal retry process, if the file is not in a closed state, RPC will directly re push to the scheduler queue header + // The retry call is in the brpc thread, so there will be no blocking of the retry RPC here + // Will not block the brpc thread as it is common to all files. Avoid affecting other files + // Because the session renewal failure may only be a network issue, IO is actually still possible after the renewal is successful + // Normal distribution, so failure cannot be directly returned upwards. Hang on at the bottom and continue sending after the renewal is successful + // 2. exitFlag_=true during file closing, retrying rpc will directly return to the user through closure + // After the return call, doneguard will call the run of the closure, releasing the inflight rpc count, + // Then the closure is returned to the user upwards. if (sessionNotValid_ == true) { if (exitFlag_) { LOG(WARNING) << " return directly for session not valid at exit!" diff --git a/src/client/copyset_client.h b/src/client/copyset_client.h index 3dc1fc66f7..c313af1b25 100644 --- a/src/client/copyset_client.h +++ b/src/client/copyset_client.h @@ -43,12 +43,12 @@ namespace client { using curve::common::Uncopyable; using ::google::protobuf::Closure; -// TODO(tongguangxun) :后续除了read、write的接口也需要调整重试逻辑 +// TODO(tongguangxun): In addition to the read and write interfaces, the retry logic needs to be adjusted in the future class MetaCache; class RequestScheduler; /** - * 负责管理 ChunkServer 的链接,向上层提供访问 - * 指定 copyset 的 chunk 的 read/write 等接口 + * Responsible for managing connections to ChunkServers and providing upper-layer access + * to read/write interfaces for specific chunks within a copyset. */ class CopysetClient { public: @@ -73,20 +73,20 @@ class CopysetClient { RequestScheduler* scheduler = nullptr, FileMetric* fileMetic = nullptr); /** - * 返回依赖的Meta Cache + * Return dependent Meta Cache */ MetaCache* GetMetaCache() { return metaCache_; } /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param souceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param idinfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer */ int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, @@ -96,16 +96,16 @@ class CopysetClient { google::protobuf::Closure *done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 + * Write Chunk + * @param idinfo is the ID information related to chunk * @param fileId: file id * @param epoch: file epoch - * @param sn:文件版本号 - * @param writeData:要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo chunk克隆源信息 - * @param done:上一层异步回调的closure + * @param sn: File version number + * @param writeData: The data to be written + *@param offset: write offset + * @param length: The length written + * @param sourceInfo: chunk Clone source information + * @param done: closure of asynchronous callback on the previous layer */ int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, @@ -118,12 +118,12 @@ class CopysetClient { Closure *done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + *Reading Chunk snapshot files + * @param idinfo: the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, @@ -132,33 +132,33 @@ class CopysetClient { Closure *done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn of the chunk + * @param idinfo is the ID information related to chunk + * @param correctedSn: Version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, uint64_t correctedSn, Closure *done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure + * Obtain information about chunk files + * @param idinfo: the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer */ int GetChunkInfo(const ChunkIDInfo& idinfo, Closure *done); /** - * @brief lazy 创建clone chunk - * @param idinfo为chunk相关的id信息 - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param done:上一层异步回调的closure - * @return 错误码 + * @brief lazy Create clone chunk + * @param idinfo: the ID information related to chunk + * @param location: URL of the data source + * @param sn: chunk's serial number + * @param correntSn: used to modify the chunk when creating CloneChunk + * @param chunkSize: Chunk size + * @param done: closure of asynchronous callback on the previous layer + * @return error code */ int CreateCloneChunk(const ChunkIDInfo& idinfo, const std::string &location, @@ -168,12 +168,12 @@ class CopysetClient { Closure *done); /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param:offset 偏移 - * @param:len 长度 - * @param done:上一层异步回调的closure - * @return 错误码 + * @brief Actual recovery chunk data + * @param idinfo is the ID information related to chunk + * @param offset: offset + * @param len: length + * @param done: closure of asynchronous callback on the previous layer + * @return error code */ int RecoverChunk(const ChunkIDInfo& idinfo, uint64_t offset, @@ -181,7 +181,7 @@ class CopysetClient { Closure *done); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId) { @@ -189,24 +189,24 @@ class CopysetClient { } /** - * session过期,需要将重试RPC停住 + * session expired, retry RPC needs to be stopped */ void StartRecycleRetryRPC() { sessionNotValid_ = true; } /** - * session恢复通知不再回收重试的RPC + * session recovery notification no longer recycles retried RPCs */ void ResumeRPCRetry() { sessionNotValid_ = false; } /** - * 在文件关闭的时候接收上层关闭通知, 根据session有效状态 - * 置位exitFlag, 如果sessio无效状态下再有rpc超时返回,这 - * 些RPC会直接错误返回,如果session正常,则将继续正常下发 - * RPC,直到重试次数结束或者成功返回 + * Receive upper-layer closure notification when the file is closed. + * Set the exitFlag based on the session's validity status. If there are RPC timeouts + * under an invalid session state, these RPCs will return errors directly. If the session + * is valid, RPCs will continue to be issued until the retry limit is reached or they return successfully. */ void ResetExitFlag() { if (sessionNotValid_) { @@ -218,43 +218,42 @@ class CopysetClient { friend class WriteChunkClosure; friend class ReadChunkClosure; - // 拉取新的leader信息 + // Pull new leader information bool FetchLeader(LogicPoolID lpid, CopysetID cpid, ChunkServerID* leaderid, butil::EndPoint* leaderaddr); /** - * 执行发送rpc task,并进行错误重试 - * @param[in]: idinfo为当前rpc task的id信息 - * @param[in]: task为本次要执行的rpc task - * @param[in]: done是本次rpc 任务的异步回调 - * @return: 成功返回0, 否则-1 + * Execute the send rpc task and retry with an error + * @param[in]: idinfo is the ID information of the current rpc task + * @param[in]: task is the rpc task executed this time + * @param[in]: done is the asynchronous callback for this RPC task + * @return: Successfully returns 0, otherwise -1 */ int DoRPCTask(const ChunkIDInfo& idinfo, std::function)> task, Closure *done); private: - // 元数据缓存 + // Metadata cache MetaCache *metaCache_; - // 所有ChunkServer的链接管理者 + // Link managers for all ChunkServers RequestSenderManager *senderManager_; - // 配置 + // Configuration IOSenderOption iosenderopt_; - // session是否有效,如果session无效那么需要将重试的RPC停住 - // RPC停住通过将这个rpc重新push到request scheduler队列,这样不会 - // 阻塞brpc内部的线程,防止一个文件的操作影响到其他文件 + // Check if the session is valid. If the session is invalid, it's necessary to pause the retry RPCs by re-pushing this RPC into the request scheduler queue. + // This ensures that it doesn't block the internal threads of BRPC and prevents operations on one file from affecting other files. bool sessionNotValid_; - // request 调度器,在session过期的时候重新将RPC push到调度队列 + // request scheduler to push RPC back to the scheduling queue when the session expires RequestScheduler* scheduler_; - // 当前copyset client对应的文件metric + // The file metric corresponding to the current copyset client FileMetric* fileMetric_; - // 是否在停止状态中,如果是在关闭过程中且session失效,需要将rpc直接返回不下发 + // Is it in a stopped state? If it is during the shutdown process and the session fails, it is necessary to directly return rpc without issuing it bool exitFlag_; }; diff --git a/src/client/file_instance.cpp b/src/client/file_instance.cpp index 63836653de..67191393ee 100644 --- a/src/client/file_instance.cpp +++ b/src/client/file_instance.cpp @@ -190,14 +190,14 @@ int FileInstance::AioDiscard(CurveAioContext *aioctx) { return -1; } -// 两种场景会造成在Open的时候返回LIBCURVE_ERROR::FILE_OCCUPIED -// 1. 强制重启qemu不会调用close逻辑,然后启动的时候原来的文件sessio还没过期. -// 导致再次去发起open的时候,返回被占用,这种情况可以通过load sessionmap -// 拿到已有的session,再去执行refresh。 -// 2. 由于网络原因,导致open rpc超时,然后再去重试的时候就会返回FILE_OCCUPIED -// 这时候当前还没有成功打开,所以还没有存储该session信息,所以无法通过refresh -// 再去打开,所以这时候需要获取mds一侧session lease时长,然后在client这一侧 -// 等待一段时间再去Open,如果依然失败,就向上层返回失败。 +// Two scenarios can lead to returning LIBCURVE_ERROR::FILE_OCCUPIED when opening: +// 1. Forcibly restarting QEMU does not trigger the close logic, and when starting, the original session file has not expired yet. +// This causes a return of "occupied" +// when attempting to open it again. This situation can be resolved by loading the session map, obtaining the existing session, and then performing a refresh. +// 2. Due to network issues, the open RPC times out, and when retrying, it returns FILE_OCCUPIED. +// At this point, the file hasn't been successfully opened yet, so the session information isn't stored, +// and it's impossible to open it through refresh. In this case, you need to obtain the session lease duration +// on the MDS side, then wait for a period on the client side before attempting to Open again. If it still fails, return a failure to the upper layer. int FileInstance::Open(std::string* sessionId) { LeaseSession_t lease; int ret = LIBCURVE_ERROR::FAILED; diff --git a/src/client/file_instance.h b/src/client/file_instance.h index 432a3402e4..34c19d3757 100644 --- a/src/client/file_instance.h +++ b/src/client/file_instance.h @@ -42,14 +42,14 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { ~FileInstance() = default; /** - * 初始化 - * @param: filename文件名用于初始化iomanager的metric信息 - * @param: mdsclient为全局的mds client - * @param: userinfo为user信息 - * @param: fileservicopt fileclient的配置选项 - * @param: clientMetric为client端要统计的metric信息 - * @param: readonly是否以只读方式打开 - * @return: 成功返回true、否则返回false + * Initialize + * @param: filename The filename used to initialize the iomanager's metric information. + * @param: mdsclient The global mds client. + * @param: userinfo User information. + * @param: fileservicopt The configuration options for the fileclient. + * @param: clientMetric Metric information to be collected on the client side. + * @param: readonly Whether to open in read-only mode. + * @return: Returns true on success, otherwise returns false. */ bool Initialize(const std::string& filename, const std::shared_ptr& mdsclient, @@ -58,39 +58,39 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { const FileServiceOption& fileservicopt, bool readonly = false); /** - * 打开文件 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Open File + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int Open(std::string* sessionId = nullptr); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode read + * @param: buf The current buffer to be read + * @param: offset The offset within the file + * @param: length The length to be read + * @return: Success returns the true length of the write, -1 indicates failure */ int Read(char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: buf The current buffer to be written + * @param: offset The offset within the file + * @parma: length The length to be read + * @return: Success returns the true length of the write, -1 indicates failure */ int Write(const char* buf, off_t offset, size_t length); /** - * 异步模式读 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read. + * @param: aioctx The I/O context for asynchronous read/write, which holds basic I/O information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 on success, less than 0 on failure */ int AioRead(CurveAioContext* aioctx, UserDataType dataType); /** - * 异步模式写 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write. + * @param: aioctx An asynchronous read/write IO context that stores basic IO information * @param: dataType type of user buffer - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, UserDataType dataType); @@ -118,7 +118,7 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { } /** - * 获取lease, 测试代码使用 + * Obtain a release to test code usage */ LeaseExecutor* GetLeaseExecutor() const { return leaseExecutor_.get(); @@ -132,9 +132,9 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { } /** - * @brief 获取当前instance对应的文件信息 + * @brief Get the file information corresponding to the current instance * - * @return 当前instance对应文件的信息 + * @return The information of the file corresponding to the current instance */ FInfo GetCurrentFileInfo() const { return finfo_; @@ -159,22 +159,22 @@ class CURVE_CACHELINE_ALIGNMENT FileInstance { void StopLease(); private: - // 保存当前file的文件信息 + // Save file information for the current file FInfo finfo_; - // 当前FileInstance的初始化配置信息 + // The initialization configuration information of the current FileInstance FileServiceOption fileopt_; - // MDSClient是FileInstance与mds通信的唯一出口 + // MDSClient is the only exit for FileInstance to communicate with mds std::shared_ptr mdsclient_; - // 每个文件都持有与MDS通信的lease,LeaseExecutor是续约执行者 + // Each file holds a lease for communication with MDS, and the LeaseExecutor is the renewal executor std::unique_ptr leaseExecutor_; - // IOManager4File用于管理所有向chunkserver端发送的IO + // IOManager4File is used to manage all IO sent to the chunkserver end IOManager4File iomanager4file_; - // 是否为只读方式 + // Whether to open in read-only mode bool readonly_ = false; // offset and length must align with `blocksize_` diff --git a/src/client/inflight_controller.h b/src/client/inflight_controller.h index 5c59f4edce..2fe633f955 100644 --- a/src/client/inflight_controller.h +++ b/src/client/inflight_controller.h @@ -40,8 +40,8 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight全部回来,这段期间是hang的, - * 在close文件时调用 + * @brief calls the interface to wait for all inflight returns, which is a period of hang, + * Called when closing a file */ void WaitInflightAllComeBack() { LOG(INFO) << "wait inflight to complete, count = " << curInflightIONum_; @@ -53,7 +53,7 @@ class InflightControl { } /** - * @brief 调用该接口等待inflight回来,这段期间是hang的 + * @brief calls the interface to wait for inflight to return, which is during the hang period */ void WaitInflightComeBack() { if (curInflightIONum_.load(std::memory_order_acquire) >= @@ -67,14 +67,14 @@ class InflightControl { } /** - * @brief 递增inflight num + * @brief increment inflight num */ void IncremInflightNum() { curInflightIONum_.fetch_add(1, std::memory_order_release); } /** - * @brief 递减inflight num + * @brief decreasing inflight num */ void DecremInflightNum() { std::lock_guard lk(inflightComeBackmtx_); @@ -90,15 +90,15 @@ class InflightControl { } /** - * WaitInflightComeBack会检查当前未返回的io数量是否超过我们限制的最大未返回inflight数量 - * 但是真正的inflight数量与上层并发调用的线程数有关。 - * 假设我们设置的maxinflight=100,上层有三个线程在同时调用GetInflightToken, - * 如果这个时候inflight数量为99,那么并发状况下这3个线程在WaitInflightComeBack - * 都会通过然后向下并发执行IncremInflightNum,这个时候真正的inflight为102, - * 下一个下发的时候需要等到inflight数量小于100才能继续,也就是等至少3个IO回来才能继续 - * 下发。这个误差是可以接受的,他与scheduler一侧并发度有关,误差有上限。 - * 如果想要精确控制inflight数量,就需要在接口处加锁,让原本可以并发的逻辑变成了 - * 串行,这样得不偿失。因此我们这里选择容忍一定误差范围。 + * WaitInflightComeBack checks if the current number of pending IOs exceeds our maximum allowed inflight limit. + * However, the actual inflight count is influenced by concurrent calls from upper-layer threads. + * Suppose we set maxinflight to 100, and there are three upper-layer threads simultaneously calling GetInflightToken. + * If, at this moment, the inflight count is 99, then in a concurrent scenario, all three threads in WaitInflightComeBack + * will pass and proceed to concurrently execute IncremInflightNum. Consequently, the actual inflight count becomes 102. + * The next dispatch operation will need to wait until the inflight count is less than 100 to proceed, which means it + * needs at least 3 IOs to return before proceeding. This margin of error is acceptable and is related to the concurrency level on the scheduler side, with a defined upper limit. + * If precise control over the inflight count is required, it would necessitate adding locks at the interface level, + * converting originally concurrent logic into serial, which would not be a cost-effective solution. Therefore, we choose to tolerate a certain margin of error in this scenario. */ void GetInflightToken() { WaitInflightComeBack(); diff --git a/src/client/io_condition_varaiable.h b/src/client/io_condition_varaiable.h index a220168db3..2cfce3560c 100644 --- a/src/client/io_condition_varaiable.h +++ b/src/client/io_condition_varaiable.h @@ -28,7 +28,7 @@ namespace curve { namespace client { -// IOConditionVariable是用户同步IO场景下IO等待条件变量 +// IOConditionVariable is the IO waiting condition variable in the user synchronous IO scenario class IOConditionVariable { public: IOConditionVariable() : retCode_(-1), done_(false), mtx_(), cv_() {} @@ -36,9 +36,9 @@ class IOConditionVariable { ~IOConditionVariable() = default; /** - * 条件变量唤醒函数,因为底层的RPC request是异步的,所以用户下发同步IO的时候需要 - * 在发送读写请求的时候暂停等待IO返回。 - * @param: retcode是当前IO的返回值 + * Condition variable wakeup function. Since the underlying RPC requests are asynchronous, + * when users initiate synchronous IO, they need to pause and wait for the IO to return while sending read/write requests. + * @param: retcode is the return value of the current IO. */ void Complete(int retcode) { std::unique_lock lk(mtx_); @@ -48,7 +48,7 @@ class IOConditionVariable { } /** - * 是用户IO需要等待时候调用的函数,这个函数会在Complete被调用的时候返回 + * This is a function called when user IO needs to wait, and this function will return when Complete is called */ int Wait() { std::unique_lock lk(mtx_); @@ -58,16 +58,16 @@ class IOConditionVariable { } private: - // 当前IO的返回值 + // The return value of the current IO int retCode_; - // 当前IO是否完成 + // Is the current IO completed bool done_; - // 条件变量使用的锁 + // Locks used by conditional variables std::mutex mtx_; - // 条件变量用于等待 + // Condition variable used for waiting std::condition_variable cv_; }; diff --git a/src/client/io_tracker.cpp b/src/client/io_tracker.cpp index 85d6dae911..d7cc044499 100644 --- a/src/client/io_tracker.cpp +++ b/src/client/io_tracker.cpp @@ -533,13 +533,13 @@ void IOTracker::Done() { DestoryRequestList(); - // scc_和aioctx都为空的时候肯定是个同步调用 + // When both scc_ and aioctx are empty, it is definitely a synchronous call. if (scc_ == nullptr && aioctx_ == nullptr) { iocv_.Complete(ToReturnCode()); return; } - // 异步函数调用,在此处发起回调 + // Asynchronous function call, where a callback is initiated if (aioctx_ != nullptr) { aioctx_->ret = ToReturnCode(); aioctx_->cb(aioctx_); @@ -548,7 +548,7 @@ void IOTracker::Done() { scc_->Run(); } - // 回收当前io tracker + // Recycle the current io tracker iomanager_->HandleAsyncIOResponse(this); } @@ -570,7 +570,7 @@ void IOTracker::ChunkServerErr2LibcurveErr(CHUNK_OP_STATUS errcode, case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: *errout = LIBCURVE_ERROR::OK; break; - // chunk或者copyset对于用户来说是透明的,所以直接返回错误 + // Chunks or copysets are transparent to users, so they directly return errors case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: *errout = LIBCURVE_ERROR::NOTEXIST; diff --git a/src/client/io_tracker.h b/src/client/io_tracker.h index 6369410ae3..9633781f50 100644 --- a/src/client/io_tracker.h +++ b/src/client/io_tracker.h @@ -49,9 +49,9 @@ class IOManager; class FileSegment; class DiscardTaskManager; -// IOTracker用于跟踪一个用户IO,因为一个用户IO可能会跨chunkserver, -// 因此在真正下发的时候会被拆分成多个小IO并发的向下发送,因此我们需要 -// 跟踪发送的request的执行情况。 +// IOTracker is used to track a user's IO, as a user's IO may cross chunkservers, +// Therefore, when it is actually distributed, it will be split into multiple small IOs and sent down concurrently. Therefore, we need to +// Track the execution status of the sent request. class CURVE_CACHELINE_ALIGNMENT IOTracker { friend class Splitor; @@ -65,23 +65,23 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { ~IOTracker() = default; /** - * @brief StartRead同步读 - * @param buf 读缓冲区 - * @param offset 读偏移 - * @param length 读长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartRead Sync Read + * @param buf read buffer + * @param offset read offset + * @param length Read length + * @param mdsclient transparently transmits to the splitter for communication with mds + * @param fileInfo Basic information of the file corresponding to the current io */ void StartRead(void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, Throttle* throttle = nullptr); /** - * @brief StartWrite同步写 - * @param buf 写缓冲区 - * @param offset 写偏移 - * @param length 写长度 - * @param mdsclient 透传给splitor,与mds通信 - * @param fileInfo 当前io对应文件的基本信息 + * @brief StartWrite Sync Write + * @param buf write buffer + * @param offset write offset + * @param length Write length + * @param mdsclient transparently transmits to the splitter for communication with mds + * @param fileInfo Basic information of the file corresponding to the current io */ void StartWrite(const void* buf, off_t offset, size_t length, MDSClient* mdsclient, const FInfo_t* fileInfo, @@ -116,15 +116,15 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { DiscardTaskManager* taskManager); /** - * chunk相关接口是提供给snapshot使用的,上层的snapshot和file - * 接口是分开的,在IOTracker这里会将其统一,这样对下层来说不用 - * 感知上层的接口类别。 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 + * The chunk-related interfaces are intended for use by snapshots. The upper-level snapshot + * and file interfaces are separate. However, in the IOTracker, they are unified so that the + * lower levels do not need to be aware of the upper-level interface category. + * @param: chunkidinfo The target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is the read buffer + * @param: scc is the asynchronous callback */ void ReadSnapChunk(const ChunkIDInfo &cinfo, uint64_t seq, @@ -133,29 +133,29 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { char *buf, SnapCloneClosure* scc); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: seq是需要修正的版本号 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn of the chunk + * @param: chunkidinfo is the target chunk + * @param: seq is the version number that needs to be corrected */ void DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &cinfo, uint64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot */ void GetChunkInfo(const ChunkIDInfo &cinfo, ChunkInfoDetail *chunkInfo); /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief lazy Create clone chunk + * @param: location is the URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when CreateCloneChunk + * @param: chunkSize chunk size + * @param: scc is an asynchronous callback */ void CreateCloneChunk(const std::string& location, const ChunkIDInfo& chunkidinfo, uint64_t sn, @@ -163,43 +163,43 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param:chunkSize chunk的大小 - * @param: scc是异步回调 + * @brief Actual recovery chunk data + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: chunkSize Chunk size + * @param: scc is an asynchronous callback */ void RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * Wait用于同步接口等待,因为用户下来的IO被client内部线程接管之后 - * 调用就可以向上返回了,但是用户的同步IO语意是要等到结果返回才能向上 - * 返回的,因此这里的Wait会让用户线程等待。 - * @return: 返回读写信息,异步IO的时候返回0或-1.0代表成功,-1代表失败 - * 同步IO返回length或-1,length代表真实读写长度,-1代表读写失败 + * Wait is used for synchronous interface waiting. When the user's IO is taken over + * by client internal threads, the call can return to the upper layer. However, the user's synchronous IO semantics require waiting for the result to return before + * returning to the upper layer, so Wait here will make the user thread wait. + * @return: Returns read/write information. For asynchronous IO, it returns 0 or -1. 0 means success, -1 means failure. + * For synchronous IO, it returns the length or -1. 'length' represents the actual read/write length, and -1 represents read/write failure. */ int Wait(); /** - * 每个request都要有自己的OP类型,这里提供接口可以在io拆分的时候获取类型 + * Each request must have its own OP type, and an interface is provided here to obtain the type during IO splitting */ OpType Optype() {return type_;} - // 设置操作类型,测试使用 + // Set operation type, test usage void SetOpType(OpType type) { type_ = type; } /** - * 因为client的IO都是异步发送的,且一个IO被拆分成多个Request,因此在异步 - * IO返回后就应该告诉IOTracker当前request已经返回,这样tracker可以处理 - * 返回的request。 - * @param: 待处理的异步request + * Because client IOs are all sent asynchronously, and a single IO is split into multiple Requests, + * after asynchronous IO returns, it should inform the IOTracker that the current request has returned. + * This way, the tracker can handle the returned request. + * @param: The asynchronous request to be processed. */ void HandleResponse(RequestContext* reqctx); /** - * 获取当前tracker id信息 + * Obtain the current tracker ID information */ uint64_t GetID() const { return id_; @@ -232,38 +232,38 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { void ReleaseAllSegmentLocks(); /** - * 当IO返回的时候调用done,由done负责向上返回 + * When IO returns, call done, which is responsible for returning upwards */ void Done(); /** - * 在io拆分或者,io分发失败的时候需要调用,设置返回状态,并向上返回 + * When IO splitting or IO distribution fails, it needs to be called, set the return status, and return upwards */ void ReturnOnFail(); /** - * 用户下来的大IO会被拆分成多个子IO,这里在返回之前将子IO资源回收 + * The user's incoming large IO will be split into multiple sub IOs, and the sub IO resources will be reclaimed before returning here */ void DestoryRequestList(); /** - * 填充request context common字段 - * @param: idinfo为chunk的id信息 - * @param: req为待填充的request context + * Fill in the request context common field + * @param: IDInfo is the ID information of the chunk + * @param: req is the request context to be filled in */ void FillCommonFields(ChunkIDInfo idinfo, RequestContext* req); /** - * chunkserver errcode转化为libcurve client的errode - * @param: errcode为chunkserver侧的errode - * @param[out]: errout为libcurve自己的errode + * Convert chunkserver errcode to libcurve client errode + * @param: errcode is the error code on the chunkserver side + * @param[out]: errout is libcurve's own errode */ void ChunkServerErr2LibcurveErr(curve::chunkserver::CHUNK_OP_STATUS errcode, LIBCURVE_ERROR* errout); /** - * 获取一个初始化后的RequestContext - * return: 如果分配失败或者初始化失败,返回nullptr - * 反之,返回一个指针 + * Obtain an initialized RequestContext + * @return: If allocation or initialization fails, return nullptr + * On the contrary, return a pointer */ RequestContext* GetInitedRequestContext() const; @@ -296,10 +296,10 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { } private: - // io 类型 + // IO type OpType type_; - // 当前IO的数据内容,data是读写数据的buffer + // The current IO data content, where data is the buffer for reading and writing data off_t offset_; uint64_t length_; @@ -315,48 +315,48 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // save read data std::vector readDatas_; - // 当用户下发的是同步IO的时候,其需要在上层进行等待,因为client的 - // IO发送流程全部是异步的,因此这里需要用条件变量等待,待异步IO返回 - // 之后才将这个等待的条件变量唤醒,然后向上返回。 + // When a user sends synchronous IO, they need to wait in the upper layer because the client's + // IO sending process is all asynchronous, so here we need to use a conditional variable to wait for asynchronous IO to return + // Afterwards, the waiting condition variable is awakened and then returned upwards. IOConditionVariable iocv_; - // 异步IO的context,在异步IO返回时,通过调用aioctx - // 的异步回调进行返回。 + // The context of asynchronous IO is called aioctx when asynchronous IO returns + // Asynchronous callback for return. CurveAioContext* aioctx_; - // 当前IO的errorcode + // The errorcode of the current IO LIBCURVE_ERROR errcode_; - // 当前IO被拆分成reqcount_个小IO + // The current IO is split into reqcount_ Small IO std::atomic reqcount_; - // 大IO被拆分成多个request,这些request放在reqlist中国保存 + // The large IO is split into multiple requests, which are stored in the reqlist in China std::vector reqlist_; // store segment indices that can be discarded std::unordered_set discardSegments_; - // metacache为当前fileinstance的元数据信息 + // metacache is the metadata information of the current fileinstance MetaCache* mc_; - // scheduler用来将用户线程与client自己的线程切分 - // 大IO被切分之后,将切分的reqlist传给scheduler向下发送 + // The scheduler is used to separate user threads from the client's own threads + // After the large IO is split, the split reqlist is passed to the scheduler and sent downwards RequestScheduler* scheduler_; - // 对于异步IO,Tracker需要向上层通知当前IO已经处理结束 - // iomanager可以将该tracker释放 + // For asynchronous IO, the Tracker needs to notify the upper level that the current IO processing has ended + // The iomanager can release the tracker IOManager* iomanager_; - // 发起时间 + // Initiation time uint64_t opStartTimePoint_; - // client端的metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // 当前tracker的id + // The ID of the current tracker uint64_t id_; - // 快照克隆系统异步调用回调指针 + // Asynchronous call callback pointer for snapshot cloning system SnapCloneClosure* scc_; bool disableStripe_; @@ -365,7 +365,7 @@ class CURVE_CACHELINE_ALIGNMENT IOTracker { // so store corresponding segment lock and release after operations finished std::vector segmentLocks_; - // id生成器 + // ID generator static std::atomic tracekerID_; static DiscardOption discardOption_; diff --git a/src/client/iomanager.h b/src/client/iomanager.h index e985b1527f..d4eb3f59a3 100644 --- a/src/client/iomanager.h +++ b/src/client/iomanager.h @@ -40,34 +40,34 @@ class IOManager { virtual ~IOManager() = default; /** - * @brief 获取当前iomanager的ID信息 + * @brief Get the ID information of the current iomanager */ virtual IOManagerID ID() const { return id_; } /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ virtual void GetInflightRpcToken() { return; } /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ virtual void ReleaseInflightRpcToken() { return; } /** - * @brief 处理异步返回的response - * @param: iotracker是当前reponse的归属 + * @brief handles response returned asynchronously + * @param: iotracker The ownership of the current reponse */ virtual void HandleAsyncIOResponse(IOTracker* iotracker) = 0; protected: - // iomanager id目的是为了让底层RPC知道自己归属于哪个iomanager + // The purpose of the iomanager id is to let the underlying RPC know which iomanager it belongs to IOManagerID id_; private: diff --git a/src/client/iomanager4chunk.h b/src/client/iomanager4chunk.h index f9cedeca02..a2515e36bf 100644 --- a/src/client/iomanager4chunk.h +++ b/src/client/iomanager4chunk.h @@ -44,14 +44,14 @@ class IOManager4Chunk : public IOManager { bool Initialize(IOOption ioOpt, MDSClient* mdsclient); /** - * 读取seq版本号的快照数据 - * @param:chunkidinfo 目标chunk - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return:成功返回真实读取长度,失败为-1 + * Read snapshot data of seq version number + * @param: chunkidinfo target chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned the true read length, failed with -1 */ int ReadSnapChunk(const ChunkIDInfo &chunkidinfo, uint64_t seq, @@ -60,35 +60,35 @@ class IOManager4Chunk : public IOManager { char *buf, SnapCloneClosure* scc); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param:chunkidinfo 目标chunk - * @param: correctedSeq是需要修正的版本号 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn of the chunk + * @param: chunkidinfo target chunk + * @param: correctedSeq is the version number that needs to be corrected */ int DeleteSnapChunkOrCorrectSn(const ChunkIDInfo &chunkidinfo, uint64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param:chunkidinfo 目标chunk - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output parameter + * @param: chunkidinfo target chunk + * @param: chunkInfo is the detailed information of the snapshot */ int GetChunkInfo(const ChunkIDInfo &chunkidinfo, ChunkInfoDetail *chunkInfo); /** - * @brief lazy 创建clone chunk + * @brief lazy Create clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs + * - The format of the location is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri is the actual address of the chunk object. + * - If the source data is on CurveFS, the location format is /filename/chunkindex@cs. * - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 - * @return 成功返回0, 否则-1 + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback + * @return successfully returns 0, otherwise -1 */ int CreateCloneChunk(const std::string &location, const ChunkIDInfo &chunkidinfo, @@ -98,47 +98,47 @@ class IOManager4Chunk : public IOManager { SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param scc 异步回调 - * @return 成功返回0, 否则-1 + * @param offset offset + * @param len length + * @param scc asynchronous callback + * @return successfully returns 0, otherwise -1 */ int RecoverChunk(const ChunkIDInfo& chunkIdInfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: 是异步返回的io + * Because the bottom layer of the curve client is asynchronous IO, each IO is assigned an IOtracker to track IO + * After this IO is completed, the underlying layer needs to inform the current IO manager to release this IOTracker, + * HandleAsyncIOResponse is responsible for releasing the IOTracker + * @param: It is an io returned asynchronously */ void HandleAsyncIOResponse(IOTracker* iotracker) override; /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInitialize(); /** - * 获取metacache,测试代码使用 + * Obtain Metacache, test code usage */ MetaCache* GetMetaCache() {return &mc_;} /** - * 设置scahuler,测试代码使用 + * Set up scahuler to test code usage */ void SetRequestScheduler(RequestScheduler* scheduler) { scheduler_ = scheduler; } private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前snapshot client元数据信息 + // metacache stores the current snapshot client metadata information MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; }; diff --git a/src/client/iomanager4file.cpp b/src/client/iomanager4file.cpp index b6f1b09527..fbd501cd30 100644 --- a/src/client/iomanager4file.cpp +++ b/src/client/iomanager4file.cpp @@ -55,8 +55,8 @@ bool IOManager4File::Initialize(const std::string& filename, return false; } - // IO Manager中不控制inflight IO数量,所以传入UINT64_MAX - // 但是IO Manager需要控制所有inflight IO在关闭的时候都被回收掉 + // The IO Manager does not control the number of inflight IOs, so UINT64_MAX is passed. + // However, the IO Manager needs to ensure that all inflight IOs are reclaimed upon shutdown. inflightCntl_.SetMaxInflightNum(UINT64_MAX); scheduler_ = new (std::nothrow) RequestScheduler(); @@ -128,8 +128,8 @@ void IOManager4File::UnInitialize() { discardTaskManager_->Stop(); { - // 这个锁保证设置exit_和delete scheduler_是原子的 - // 这样保证在scheduler_被析构的时候lease线程不会使用scheduler_ + // This lock ensures that setting exit_ and deleting scheduler_ are atomic. + // This ensures that the lease thread won't use scheduler_ when it is being destructed. std::unique_lock lk(exitMtx_); exit_ = true; diff --git a/src/client/iomanager4file.h b/src/client/iomanager4file.h index eaecc8497f..221c7dd7e9 100644 --- a/src/client/iomanager4file.h +++ b/src/client/iomanager4file.h @@ -57,11 +57,11 @@ class IOManager4File : public IOManager { ~IOManager4File() = default; /** - * 初始化函数 - * @param: filename为当前iomanager服务的文件名 - * @param: ioopt为当前iomanager的配置信息 - * @param: mdsclient向下透传给metacache - * @return: 成功true,失败false + * Initialization function + * @param: filename is the file name of the current iomanager service + * @param: ioopt is the configuration information of the current iomanager + * @param: mdsclient penetrates downwards to Metacache + * @return: Success true, failure false */ bool Initialize(const std::string& filename, const IOOption& ioOpt, @@ -73,39 +73,39 @@ class IOManager4File : public IOManager { void UnInitialize(); /** - * 同步模式读 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @return: 成功返回读取真实长度,-1为失败 + * Synchronous mode reading + * @param: buf is the current buffer to be read + * @param: offset is the offset in file + * @parma: length is the length to be read + * @param: mdsclient transparently transmits to the underlying layer and communicates with mds when necessary + * @return: Successfully returned reading the true length, -1 indicates failure */ int Read(char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 同步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @param:length为待读取的长度 - * @return: 成功返回写入真实长度,-1为失败 + * Synchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and communicates with mds when necessary + * @param: buf is the current buffer to be written + * @param: offset is the offset within the file + * @param: length is the length to be read + * @return: Success returns the true length of the write, -1 indicates failure */ int Write(const char* buf, off_t offset, size_t length, MDSClient* mdsclient); /** - * 异步模式读 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: mdsclient transparently transmits to the underlying layer and communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioRead(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); /** - * 异步模式写 - * @param: mdsclient透传给底层,在必要的时候与mds通信 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: mdsclient transparently transmits to the underlying layer and communicates with mds when necessary + * @param: aioctx is an asynchronous read/write IO context that stores basic IO information * @param dataType type of aioctx->buf - * @return: 0为成功,小于0为失败 + * @return: 0 indicates success, less than 0 indicates failure */ int AioWrite(CurveAioContext* aioctx, MDSClient* mdsclient, UserDataType dataType); @@ -128,52 +128,52 @@ class IOManager4File : public IOManager { int AioDiscard(CurveAioContext* aioctx, MDSClient* mdsclient); /** - * @brief 获取rpc发送令牌 + * @brief Get rpc send token */ void GetInflightRpcToken() override; /** - * @brief 释放rpc发送令牌 + * @brief Release RPC Send Token */ void ReleaseInflightRpcToken() override; /** - * 获取metacache,测试代码使用 + * Obtain Metacache, test code usage */ MetaCache* GetMetaCache() { return &mc_; } /** - * 设置scheduler,测试代码使用 + * Set the scheduler to test the code using */ void SetRequestScheduler(RequestScheduler* scheduler) { scheduler_ = scheduler; } /** - * 获取metric信息,测试代码使用 + * Obtain metric information and test code usage */ FileMetric* GetMetric() { return fileMetric_; } /** - * 重新设置io配置信息,测试使用 + * Reset IO configuration information for testing use */ void SetIOOpt(const IOOption& opt) { ioopt_ = opt; } /** - * 测试使用,获取request scheduler + * Test usage, obtain request scheduler */ RequestScheduler* GetScheduler() { return scheduler_; } /** - * lease excutor在检查到版本更新的时候,需要通知iomanager更新文件版本信息 - * @param: fi为当前需要更新的文件信息 + * When the lease excutor detects a version update, it needs to notify the iomanager to update the file version information + * @param: fi is the current file information that needs to be updated */ void UpdateFileInfo(const FInfo_t& fi); @@ -190,14 +190,14 @@ class IOManager4File : public IOManager { } /** - * 返回文件最新版本号 + * Return the latest version number of the file */ uint64_t GetLatestFileSn() const { return mc_.GetLatestFileSn(); } /** - * 更新文件最新版本号 + * Update the latest version number of the file */ void SetLatestFileSn(uint64_t newSn) { mc_.SetLatestFileSn(newSn); @@ -220,26 +220,26 @@ class IOManager4File : public IOManager { friend class LeaseExecutor; friend class FlightIOGuard; /** - * lease相关接口,当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 将新下发的IO全部失败返回 + * lease related interface, when LeaseExecutor contract renewal fails, calls LeaseTimeoutDisableIO + * Failed to return all newly issued IOs */ void LeaseTimeoutBlockIO(); /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO + * When the lease is successfully renewed, the LeaseExecutor calls the interface to restore IO */ void ResumeIO(); /** - * 当lesaeexcutor发现版本变更,调用该接口开始等待inflight回来,这段期间IO是hang的 + * When the lesaeexcutor detects a version change, it calls the interface and waits for inflight to return. During this period, IO is hanging */ void BlockIO(); /** - * 因为curve client底层都是异步IO,每个IO会分配一个IOtracker跟踪IO - * 当这个IO做完之后,底层需要告知当前io manager来释放这个IOTracker, - * HandleAsyncIOResponse负责释放IOTracker - * @param: iotracker是返回的异步io + * Because the bottom layer of the curve client is asynchronous IO, each IO is assigned an IOtracker to track IO + * After this IO is completed, the underlying layer needs to inform the current IO manager to release this IOTracker, + * HandleAsyncIOResponse is responsible for releasing the IOTracker + * @param: iotracker is an asynchronous io returned */ void HandleAsyncIOResponse(IOTracker* iotracker) override; @@ -261,42 +261,42 @@ class IOManager4File : public IOManager { bool IsNeedDiscard(size_t len) const; private: - // 每个IOManager都有其IO配置,保存在iooption里 + // Each IOManager has its IO configuration, which is saved in the iooption IOOption ioopt_; - // metacache存储当前文件的所有元数据信息 + // metacache stores all metadata information for the current file MetaCache mc_; - // IO最后由schedule模块向chunkserver端分发,scheduler由IOManager创建和释放 + // The IO is finally distributed by the schedule module to the chunkserver end, and the scheduler is created and released by the IOManager RequestScheduler* scheduler_; - // client端metric统计信息 + // Metric statistics on the client side FileMetric* fileMetric_; - // task thread pool为了将qemu线程与curve线程隔离 + // The task thread pool is used to isolate the QEMU thread from the curve thread curve::common::TaskThreadPool taskPool_; - // inflight IO控制 + // inflight IO control InflightControl inflightCntl_; - // inflight rpc控制 + // inflight rpc control InflightControl inflightRpcCntl_; std::unique_ptr throttle_; - // 是否退出 + // Exit or not bool exit_; - // lease续约线程与qemu一侧线程调用是并发的 - // qemu在调用close的时候会关闭iomanager及其对应 - // 资源。lease续约线程在续约成功或失败的时候会通知iomanager的 - // scheduler线程现在需要block IO或者resume IO,所以 - // 如果在lease续约线程需要通知iomanager的时候,这时候 - // 如果iomanager的资源scheduler已经被释放了,就会 - // 导致crash,所以需要对这个资源加一把锁,在退出的时候 - // 不会有并发的情况,保证在资源被析构的时候lease续约 - // 线程不会再用到这些资源. + // The lease renewal thread and the QEMU-side thread are concurrent. + // When QEMU calls close, it closes the iomanager and its corresponding resources. + // The lease renewal thread notifies the iomanager's scheduler thread when renewal succeeds or fails, + // indicating whether it needs to block or resume IO. Therefore, + // if the lease renewal thread needs to notify the iomanager at this point, + // and if the iomanager's scheduler resources have already been released, + // it may lead to a crash. So, it's necessary to add a lock to protect this resource, + // ensuring that there is no concurrency when exiting. This ensures that + // the lease renewal thread won't use these resources when they are being destructed. std::mutex exitMtx_; // enable/disable stripe for read/write of stripe file diff --git a/src/client/lease_executor.cpp b/src/client/lease_executor.cpp index c8db8ddd30..d2022fbcf6 100644 --- a/src/client/lease_executor.cpp +++ b/src/client/lease_executor.cpp @@ -190,7 +190,7 @@ void LeaseExecutor::ResetRefreshSessionTask() { return; } - // 等待前一个任务退出 + // Waiting for the previous task to exit task_->Stop(); task_->WaitTaskExit(); diff --git a/src/client/lease_executor.h b/src/client/lease_executor.h index 2236dc9982..dd2721e5b9 100644 --- a/src/client/lease_executor.h +++ b/src/client/lease_executor.h @@ -41,9 +41,9 @@ namespace client { class RefreshSessionTask; /** - * lease refresh结果,session如果不存在就不需要再续约 - * 如果session存在但是lease续约失败,继续续约 - * 续约成功了FInfo_t中才会有对应的文件信息 + * Please refresh the result. If the session does not exist, there is no need to renew it + * If the session exists but the lease renewal fails, continue to renew the contract + * Successfully renewed the contract, FInfo_ Only in t will there be corresponding file information */ struct LeaseRefreshResult { enum class Status { @@ -62,19 +62,19 @@ class LeaseExecutorBase { }; /** - * 每个vdisk对应的fileinstance都会与mds保持心跳 - * 心跳通过LeaseExecutor实现,LeaseExecutor定期 - * 去mds续约,同时将mds端当前file最新的版本信息带回来 - * 然后检查版本信息是否变更,如果变更就需要通知iomanager - * 更新版本。如果续约失败,就需要将用户新发下来的io直接错误返回 + * The fileinstance corresponding to each vdisk will maintain heartbeat with the mds + * Heartbeat is achieved through LeaseExecutor, which periodically + * Go to MDS to renew the contract and bring back the latest version information of the current file on the MDS side + * Then check if the version information has changed, and if so, notify the iomanager + * Updated version. If the renewal fails, the user's newly sent IO needs to be returned in error */ class LeaseExecutor : public LeaseExecutorBase { public: /** - * 构造函数 - * @param: leaseopt为当前lease续约的option配置 - * @param: mdsclient是与mds续约的client - * @param: iomanager会在续约失败或者版本变更的时候进行io调度 + * Constructor + * @param: leaseopt is the option configuration for the current lease renewal + * @param: mdsclient is a renewed client with mds + * @param: iomanager will schedule IO in case of contract renewal failure or version change */ LeaseExecutor(const LeaseOption& leaseOpt, const UserInfo& userinfo, MDSClient* mdscllent, IOManager4File* iomanager); @@ -82,26 +82,26 @@ class LeaseExecutor : public LeaseExecutorBase { ~LeaseExecutor(); /** - * LeaseExecutor需要finfo保存filename - * LeaseSession_t是当前leaeexcutor的执行配置 - * @param: fi为当前需要续约的文件版本信息 - * @param: lease为续约的lease信息 - * @return: 成功返回true,否则返回false + * LeaseExecutor requires finfo to save the filename + * LeaseSession_t is the execution configuration of the current leaeexcutor + * @param: fi is the current version information of the file that needs to be renewed + * @param: lease is the lease information for renewal + * @return: Successfully returns true, otherwise returns false */ bool Start(const FInfo_t& fi, const LeaseSession_t& lease); /** - * 停止续约 + *Stop Renewal */ void Stop(); /** - * 当前lease如果续约失败则通知iomanagerdisable io + *Notify iomanagerdisable io if the current lease renewal fails */ bool LeaseValid(); /** - * 测试使用,主动失效增加刷新失败 + *Test use, active failure increases refresh failure */ void InvalidLease() { for (uint32_t i = 0; i <= leaseoption_.mdsRefreshTimesPerLease; i++) { @@ -110,20 +110,20 @@ class LeaseExecutor : public LeaseExecutorBase { } /** - * @brief 续约任务执行者 - * @return 是否继续执行refresh session任务 + * @brief Renew Task Executor + * @return Do you want to continue executing the refresh session task */ bool RefreshLease() override; /** - * @brief 测试使用,重置refresh session task + * @brief test use, reset refresh session task */ void ResetRefreshSessionTask(); private: /** - * 一个lease期间会续约rfreshTimesPerLease次,每次续约失败就递增 - * 当连续续约rfreshTimesPerLease次失败的时候,则disable IO + * During a lease period, rfreshTimesPerLease will be renewed times, increasing every time the renewal fails + * When consecutive renewals of rfreshTimesPerLease fail times, disable IO */ void IncremRefreshFailed(); @@ -135,38 +135,38 @@ class LeaseExecutor : public LeaseExecutorBase { void CheckNeedUpdateFileInfo(const FInfo& fileInfo); private: - // 与mds进行lease续约的文件名 + // File name for lease renewal with mds std::string fullFileName_; - // 用于续约的client + // client for renewal MDSClient* mdsclient_; - // 用于发起refression的user信息 + // User information used to initiate a expression UserInfo_t userinfo_; - // IO管理者,当文件需要更新版本信息或者disable io的时候调用其接口 + // IO manager, calls its interface when a file needs to update version information or disable IO IOManager4File* iomanager_; - // 当前lease执行的配置信息 + // Configuration information for the current lease execution LeaseOption leaseoption_; - // mds端传过来的lease信息,包含当前文件的lease时长,及sessionid + // The lease information transmitted from the mds end, including the lease duration of the current file and the sessionid LeaseSession_t leasesession_; - // 记录当前lease是否可用 + // Record whether the current lease is available std::atomic isleaseAvaliable_; - // 记录当前连续续约失败的次数 + // Record the current number of consecutive renewal failures std::atomic failedrefreshcount_; - // refresh session定时任务,会间隔固定时间执行一次 + // refresh session scheduled tasks will be executed at fixed intervals std::unique_ptr task_; }; -// RefreshSessin定期任务 -// 利用brpc::PeriodicTaskManager进行管理 -// 定时器触发时调用OnTriggeringTask,根据返回值决定是否继续定时触发 -// 如果不再继续触发,调用OnDestroyingTask进行清理操作 +// RefreshSession Recurring Task +// Manage using brpc::PeriodicTaskManager +// Call OnTriggeringTask when the timer is triggered, and decide whether to continue timing triggering based on the return value +// If no longer triggered, call OnDestroyingTask for cleaning operation class RefreshSessionTask : public brpc::PeriodicTask { public: using Task = std::function; @@ -193,10 +193,10 @@ class RefreshSessionTask : public brpc::PeriodicTask { virtual ~RefreshSessionTask() = default; /** - * @brief 定时器超时后执行当前函数 - * @param next_abstime 任务下次执行的绝对时间 - * @return true 继续定期执行当前任务 - * false 停止执行当前任务 + * @brief: Execute current function after timer timeout + * @param next_abstime Absolute time for the next execution of the task + * @return true Continue to regularly execute the current task + * false Stop executing the current task */ bool OnTriggeringTask(timespec* next_abstime) override { std::lock_guard lk(stopMtx_); @@ -209,7 +209,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 停止再次执行当前任务 + * @brief Stop executing the current task again */ void Stop() { std::lock_guard lk(stopMtx_); @@ -217,7 +217,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 任务停止后调用 + * @brief is called after the task stops */ void OnDestroyingTask() override { std::unique_lock lk(terminatedMtx_); @@ -226,7 +226,7 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 等待任务退出 + * @brief Wait for the task to exit */ void WaitTaskExit() { std::unique_lock lk(terminatedMtx_); @@ -236,8 +236,8 @@ class RefreshSessionTask : public brpc::PeriodicTask { } /** - * @brief 获取refresh session时间间隔(us) - * @return refresh session任务时间间隔(us) + * @brief Get refresh session time interval (us) + * @return refresh session task time interval (us) */ uint64_t RefreshIntervalUs() const { return refreshIntervalUs_; diff --git a/src/client/libcurve_file.cpp b/src/client/libcurve_file.cpp index 2a61bb88ea..44884f673c 100644 --- a/src/client/libcurve_file.cpp +++ b/src/client/libcurve_file.cpp @@ -355,7 +355,7 @@ int FileClient::Create2(const CreateFileContext& context) { } int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -370,7 +370,7 @@ int FileClient::Read(int fd, char *buf, off_t offset, size_t len) { } int FileClient::Write(int fd, const char *buf, off_t offset, size_t len) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (len == 0) { return -LIBCURVE_ERROR::OK; } @@ -397,7 +397,7 @@ int FileClient::Discard(int fd, off_t offset, size_t length) { int FileClient::AioRead(int fd, CurveAioContext *aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -417,7 +417,7 @@ int FileClient::AioRead(int fd, CurveAioContext *aioctx, int FileClient::AioWrite(int fd, CurveAioContext *aioctx, UserDataType dataType) { - // 长度为0,直接返回,不做任何操作 + // Length is 0, returns directly without any operation if (aioctx->length == 0) { return -LIBCURVE_ERROR::OK; } @@ -753,7 +753,7 @@ bool FileClient::StartDummyServer() { return false; } - // 获取本地ip + // Obtain local IP std::string ip; if (!common::NetCommon::GetLocalIP(&ip)) { LOG(ERROR) << "Get local ip failed!"; @@ -771,7 +771,7 @@ bool FileClient::StartDummyServer() { } // namespace curve -// 全局初始化与反初始化 +// Global initialization and deinitialization int GlobalInit(const char *configpath); void GlobalUnInit(); diff --git a/src/client/libcurve_file.h b/src/client/libcurve_file.h index 1f1202bbbb..651a0aeca2 100644 --- a/src/client/libcurve_file.h +++ b/src/client/libcurve_file.h @@ -36,7 +36,7 @@ #include "src/common/concurrent/rw_lock.h" #include "src/client/chunkserver_broadcaster.h" -// TODO(tongguangxun) :添加关键函数trace功能 +// TODO(tongguangxun): Add key function trace function namespace curve { namespace client { @@ -48,28 +48,28 @@ class FileClient { virtual ~FileClient() = default; /** - * file对象初始化函数 - * @param: 配置文件路径 + * file object initialization function + * @param: Configuration file path */ virtual int Init(const std::string& configpath); /** - * 打开或创建文件 - * @param: filename文件名 - * @param: userinfo是操作文件的用户信息 - * @return: 返回文件fd + * Open or create a file + * @param: filename File name + * @param: userinfo is the user information for operating the file + * @return: Return the file fd */ virtual int Open(const std::string& filename, const UserInfo_t& userinfo, const OpenFlags& openflags = {}); /** - * 打开文件,这个打开只是创建了一个fd,并不与mds交互,没有session续约 - * 这个Open接口主要是提供给快照克隆镜像系统做数据拷贝使用 - * @param: filename文件名 - * @param: userinfo当前用户信息 + * Open the file. This only creates an fd and does not interact with mds. There is no session renewal + * This Open interface is mainly provided for data copying in snapshot clone image systems + * @param: filename File name + * @param: userinfo Current user information * @param disableStripe enable/disable stripe feature for a stripe file - * @return: 返回文件fd + * @return: Return the file fd */ virtual int Open4ReadOnly(const std::string& filename, const UserInfo_t& userinfo, @@ -87,12 +87,12 @@ class FileClient { const UserInfo_t& userinfo); /** - * 创建文件 - * @param: filename文件名 - * @param: userinfo是当前打开或创建时携带的user信息 - * @param: size文件长度,当create为true的时候以size长度创建文件 - * @return: 成功返回0, 失败可能有多种可能 - * 比如内部错误,或者文件已存在 + * Create File + * @param: filename File name + * @param: userinfo is the user information that is currently carried when opening or creating + * @param: size file length. When create is true, create a file with size length + * @return: Success returns 0, failure may have multiple possibilities + * For example, internal errors or files that already exist */ virtual int Create(const std::string& filename, const UserInfo_t& userinfo, @@ -105,22 +105,22 @@ class FileClient { virtual int Create2(const CreateFileContext& context); /** - * 同步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待读取的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * Synchronous mode reading + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be read + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returned the number of bytes read, otherwise an error code less than 0 will be returned */ virtual int Read(int fd, char* buf, off_t offset, size_t length); /** - * 同步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: buf为当前待写入的缓冲区 - * @param:offset文件内的便宜 - * @parma:length为待读取的长度 - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * Synchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: buf is the current buffer to be written + * @param: offset within the file + * @parma: length is the length to be read + * @return: Successfully returns the number of bytes written, otherwise returns an error code less than 0 */ virtual int Write(int fd, const char* buf, off_t offset, size_t length); @@ -135,21 +135,21 @@ class FileClient { virtual int Discard(int fd, off_t offset, size_t length); /** - * 异步模式读 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode read + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回读取字节数,否则返回小于0的错误码 + * @return: Successfully returned the number of bytes read, otherwise an error code less than 0 will be returned */ virtual int AioRead(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); /** - * 异步模式写 - * @param: fd为当前open返回的文件描述符 - * @param: aioctx为异步读写的io上下文,保存基本的io信息 + * Asynchronous mode write + * @param: fd is the file descriptor returned by the current open + * @param: aioctx is an asynchronous read/write IO context that stores basic IO information * @param dataType type of aioctx->buf, default is `UserDataType::RawBuffer` - * @return: 成功返回写入字节数,否则返回小于0的错误码 + * @return: Successfully returns the number of bytes written, otherwise returns an error code less than 0 */ virtual int AioWrite(int fd, CurveAioContext* aioctx, UserDataType dataType = UserDataType::RawBuffer); @@ -163,30 +163,30 @@ class FileClient { virtual int AioDiscard(int fd, CurveAioContext* aioctx); /** - * 重命名文件 - * @param: userinfo是用户信息 - * @param: oldpath源路劲 - * @param: newpath目标路径 + * Rename File + * @param: userinfo is the user information + * @param: oldpath Yuanlujin + * @param: newpath Target Path */ virtual int Rename(const UserInfo_t& userinfo, const std::string& oldpath, const std::string& newpath); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ virtual int Extend(const std::string& filename, const UserInfo_t& userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce=true只能用于从回收站删除,false为放入垃圾箱 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce=true can only be used to delete from the recycle bin, false means to put it in the trash can */ virtual int Unlink(const std::string& filename, const UserInfo_t& userinfo, @@ -203,35 +203,35 @@ class FileClient { uint64_t fileId); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ virtual int Listdir(const std::string& dirpath, const UserInfo_t& userinfo, std::vector* filestatVec); /** - * 创建目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Create directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Mkdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 删除目录 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 + * Delete directory + * @param: userinfo is the user information + * @param: dirpath is the directory path */ virtual int Rmdir(const std::string& dirpath, const UserInfo_t& userinfo); /** - * 获取文件信息 - * @param: filename文件名 - * @param: userinfo是用户信息 - * @param: finfo是出参,携带当前文件的基础信息 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * Obtain file information + * @param: filename File name + * @param: userinfo is the user information + * @param: finfo is an output parameter that carries the basic information of the current file + * @return: Success returns int::OK, otherwise an error code less than 0 will be returned */ virtual int StatFile(const std::string& filename, const UserInfo_t& userinfo, @@ -247,47 +247,47 @@ class FileClient { virtual int StatFile(int fd, FileStatInfo* finfo); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回-LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return to -LIBCURVE_ERROR::FAILED,-LIBCURVE_ERROR::AUTHFAILED, etc */ virtual int ChangeOwner(const std::string& filename, const std::string& newOwner, const UserInfo_t& userinfo); /** - * close通过fd找到对应的instance进行删除 - * @param: fd为当前open返回的文件描述符 - * @return: 成功返回int::OK,否则返回小于0的错误码 + * close and delete the corresponding instance through fd + * @param: fd is the file descriptor returned by the current open + * @return: Success returns int::OK, otherwise an error code less than 0 will be returned */ virtual int Close(int fd); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ virtual void UnInit(); /** - * @brief: 获取集群id - * @param: buf存放集群id - * @param: buf的长度 - * @return: 成功返回0, 失败返回-LIBCURVE_ERROR::FAILED + * @brief: Obtain cluster ID + * @param: buf Storage Cluster ID + * @param: The length of buf + * @return: Success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetClusterId(char* buf, int len); /** - * @brief 获取集群id - * @return 成功返回集群id,失败返回空 + * @brief Get cluster ID + * @return Successfully returned cluster ID, failed returned empty */ std::string GetClusterId(); /** - * @brief 获取文件信息,测试使用 - * @param fd 文件句柄 - * @param[out] finfo 文件信息 - * @return 成功返回0,失败返回-LIBCURVE_ERROR::FAILED + * @brief to obtain file information for testing purposes + * @param fd file handle + * @param[out] finfo file information + * @return success returns 0, failure returns -LIBCURVE_ERROR::FAILED */ int GetFileInfo(int fd, FInfo* finfo); @@ -295,8 +295,8 @@ class FileClient { std::vector ListPoolset(); /** - * 测试使用,获取当前挂载文件数量 - * @return 返回当前挂载文件数量 + * Test usage to obtain the current number of mounted files + * @return Returns the current number of mounted files */ uint64_t GetOpenedFileNum() const { return openedFileNum_.get_value(); @@ -310,18 +310,18 @@ class FileClient { private: BthreadRWLock rwlock_; - // 向上返回的文件描述符,对于QEMU来说,一个vdisk对应一个文件描述符 + // The file descriptor returned upwards, for QEMU, one vdisk corresponds to one file descriptor std::atomic fdcount_; - // 每个vdisk都有一个FileInstance,通过返回的fd映射到对应的instance + // Each vdisk has a FileInstance, which is mapped to the corresponding instance through the returned fd std::unordered_map fileserviceMap_; // std::unordered_map fileserviceFileNameMap_; - // FileClient配置 + // FileClient Configuration ClientConfig clientconfig_; - // fileclient对应的全局mdsclient + // Global mdsclient corresponding to fileclient std::shared_ptr mdsClient_; // chunkserver client @@ -329,10 +329,10 @@ class FileClient { // chunkserver broadCaster std::shared_ptr csBroadCaster_; - // 是否初始化成功 + // Is initialization successful bool inited_; - // 挂载文件数量 + // Number of mounted files bvar::Adder openedFileNum_; }; diff --git a/src/client/libcurve_snapshot.h b/src/client/libcurve_snapshot.h index d8b2ce841a..6c4ce59717 100644 --- a/src/client/libcurve_snapshot.h +++ b/src/client/libcurve_snapshot.h @@ -34,76 +34,76 @@ namespace curve { namespace client { -// SnapshotClient为外围快照系统与MDS和Chunkserver通信的出口 +// SnapshotClient is the exit for peripheral snapshot systems to communicate with MDS and Chunkserver class SnapshotClient { public: SnapshotClient(); ~SnapshotClient() = default; /** - * 初始化函数,外围系统直接传入配置选项 - * @param: opt为外围配置选项 - * @return:0为成功,-1为失败 + * Initialization function, peripheral system directly passes in configuration options + * @param: opt is the peripheral configuration option + * @return: 0 indicates success, -1 indicates failure */ int Init(const ClientConfigOption& opt); /** - * file对象初始化函数 - * @param: 配置文件路径 + * file object initialization function + * @param: Configuration file path */ int Init(const std::string& configpath); /** - * 创建快照 - * @param: userinfo是用户信息 - * @param: filename为要创建快照的文件名 - * @param: seq是出参,获取该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Create a snapshot + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is the output parameter to obtain the version information of the file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, uint64_t* seq); /** - * 删除快照 - * @param: userinfo是用户信息 - * @param: filename为要删除的文件名 - * @param: seq该文件的版本信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Delete snapshot + * @param: userinfo is the user information + * @param: filename is the file name to be deleted + * @param: seq The version information of this file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, uint64_t seq); /** - * 获取快照对应的文件信息 - * @param: userinfo是用户信息 - * @param: filename为对应的文件名 - * @param: seq为该文件打快照时对应的版本信息 - * @param: snapinfo是出参,保存当前文件的基础信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain file information corresponding to the snapshot + * @param: userinfo is the user information + * @param: filename is the corresponding file name + * @param: seq corresponds to the version information when taking a snapshot of the file + * @param: snapinfo is a parameter that saves the basic information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int GetSnapShot(const std::string& fname, const UserInfo_t& userinfo, uint64_t seq, FInfo* snapinfo); /** - * 列出当前文件对应版本列表的文件信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seqvec是当前文件的版本列表 - * @param: snapif是出参,获取多个seq号的文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * List the file information corresponding to the version list of the current file + * @param: userinfo is the user information + * @param: filenam file name + * @param: seqvec is the version list of the current file + * @param: snapif is a parameter that obtains file information for multiple seq numbers + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, const std::vector* seqvec, std::map* snapif); /** - * 获取快照数据segment信息 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: offset是文件的偏移 - * @param:segInfo是出参,保存当前文件的快照segment信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain snapshot data segment information + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: offset is the offset of the file + * @param: segInfo is a parameter that saves the snapshot segment information of the current file + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int GetSnapshotSegmentInfo(const std::string& filename, const UserInfo_t& userinfo, @@ -112,59 +112,59 @@ class SnapshotClient { SegmentInfo *segInfo); /** - * 读取seq版本号的快照数据 - * @param: cidinfo是当前chunk对应的id信息 - * @param: seq是快照版本号 - * @param: offset是快照内的offset - * @param: len是要读取的长度 - * @param: buf是读取缓冲区 - * @param: scc是异步回调 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Read snapshot data of seq version number + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: seq is the snapshot version number + * @param: offset is the offset within the snapshot + * @param: len is the length to be read + * @param: buf is a read buffer + * @param: scc is an asynchronous callback + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, uint64_t offset, uint64_t len, char *buf, SnapCloneClosure* scc); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param: cidinfo是当前chunk对应的id信息 - * @param: correctedSeq是chunk需要修正的版本 + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correntSn of the chunk + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: correctedSeq is the version of chunk that needs to be corrected */ int DeleteChunkSnapshotOrCorrectSn(ChunkIDInfo cidinfo, uint64_t correctedSeq); /** - * 获取chunk的版本信息,chunkInfo是出参 - * @param: cidinfo是当前chunk对应的id信息 - * @param: chunkInfo是快照的详细信息 + * Obtain the version information of the chunk, where chunkInfo is the output parameter + * @param: cidinfo is the ID information corresponding to the current chunk + * @param: chunkInfo is the detailed information of the snapshot */ int GetChunkInfo(ChunkIDInfo cidinfo, ChunkInfoDetail *chunkInfo); /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 + * Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information */ int CheckSnapShotStatus(const std::string& filename, const UserInfo_t& userinfo, uint64_t seq, FileStatus* filestatus); /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged * - * @param source clone源文件名 - * @param: destination clone目标文件名 - * @param: userinfo 用户信息 - * @param: size 文件大小 - * @param: sn 版本号 - * @param: chunksize是要创建文件的chunk大小 + * @param source clone Source file name + * @param: destination clone Destination file name + * @param: userinfo User information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the file to be created * @param stripeUnit stripe size * @param stripeCount stripe count * @param poolset poolset of destination file - * @param[out] fileinfo 创建的目标文件的文件信息 + * @param[out] fileinfo The file information of the target file created * - * @return 错误码 + * @return error code */ int CreateCloneFile(const std::string& source, const std::string& destination, @@ -178,15 +178,15 @@ class SnapshotClient { FInfo* fileinfo); /** - * @brief lazy 创建clone chunk - * @param:location 数据源的url - * @param:chunkidinfo 目标chunk - * @param:sn chunk的序列号 - * @param:chunkSize chunk的大小 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param: scc是异步回调 + * @brief lazy Create clone chunk + * @param: location URL of the data source + * @param: chunkidinfo target chunk + * @param: sn chunk's serial number + * @param: chunkSize Chunk size + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ int CreateCloneChunk(const std::string &location, const ChunkIDInfo &chunkidinfo, uint64_t sn, @@ -194,49 +194,49 @@ class SnapshotClient { SnapCloneClosure* scc); /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * - * @param:chunkidinfo chunkidinfo - * @param:offset 偏移 - * @param:len 长度 - * @param: scc是异步回调 + * @param: chunkidinfo chunkidinfo + * @param: offset offset + * @param: len length + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ int RecoverChunk(const ChunkIDInfo &chunkidinfo, uint64_t offset, uint64_t len, SnapCloneClosure* scc); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ int CompleteCloneMeta(const std::string &destination, const UserInfo_t& userinfo); /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ int CompleteCloneFile(const std::string &destination, const UserInfo_t& userinfo); /** - * 设置clone文件状态 - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 + * Set clone file status + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required * - * @return 错误码 + * @return error code */ int SetCloneFileStatus(const std::string &filename, const FileStatus& filestatus, @@ -244,26 +244,26 @@ class SnapshotClient { uint64_t fileID = 0); /** - * @brief 获取文件信息 + * @brief Get file information * - * @param:filename 文件名 - * @param:userinfo 用户信息 - * @param[out] fileInfo 文件信息 + * @param: filename File name + * @param: userinfo User Information + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ int GetFileInfo(const std::string &filename, const UserInfo_t& userinfo, FInfo* fileInfo); /** - * @brief 查询或分配文件segment信息 + * @brief Query or allocate file segment information * - * @param:userinfo 用户信息 - * @param:offset 偏移值 - * @param:segInfo segment信息 + * @param: userinfo User Information + * @param: offset offset value + * @param: segInfo segment information * - * @return 错误码 + * @return error code */ int GetOrAllocateSegmentInfo(bool allocate, uint64_t offset, @@ -271,15 +271,15 @@ class SnapshotClient { SegmentInfo *segInfo); /** - * @brief 为recover rename复制的文件 + * @brief is the file copied for recover rename * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file * - * @return 错误码 + * @return error code */ int RenameCloneFile(const UserInfo_t& userinfo, uint64_t originId, @@ -288,42 +288,42 @@ class SnapshotClient { const std::string &destination); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: id is the file id, with a default value of 0. If the user does not specify this value, the id will not be passed to mds */ int DeleteFile(const std::string& filename, const UserInfo_t& userinfo, uint64_t id = 0); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInit(); /** - * 获取iomanager信息,测试代码使用 + * Obtain iomanager information and test code usage */ IOManager4Chunk* GetIOManager4Chunk() {return &iomanager4chunk_;} private: /** - * 获取logicalpool中copyset的serverlist - * @param: lpid是逻辑池id - * @param: csid是逻辑池中的copysetid数据集 - * @return: 成功返回LIBCURVE_ERROR::OK,否则LIBCURVE_ERROR::FAILED + * Obtain the serverlist of copyset in the logicalpool + * @param: lpid is the logical pool id + * @param: csid is the copysetid dataset in the logical pool + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise LIBCURVE_ERROR::FAILED */ int GetServerList(const LogicPoolID& lpid, const std::vector& csid); private: - // MDSClient负责与Metaserver通信,所有通信都走这个接口 + // MDSClient is responsible for communicating with Metaserver, and all communication goes through this interface MDSClient mdsclient_; - // IOManager4Chunk用于管理发向chunkserver端的IO + // IOManager4Chunk is used to manage IO sent to the chunkserver end IOManager4Chunk iomanager4chunk_; - // 用于client 配置读取 + // Used for client configuration reading ClientConfig clientconfig_; }; } // namespace client diff --git a/src/client/mds_client.cpp b/src/client/mds_client.cpp index b2827b0ec4..d3b14d7cc6 100644 --- a/src/client/mds_client.cpp +++ b/src/client/mds_client.cpp @@ -45,21 +45,21 @@ using curve::mds::StatusCode; using curve::common::ChunkServerLocation; using curve::mds::topology::CopySetServerInfo; -// rpc发送和mds地址切换状态机 +// Rpc sending and mds address switching state machine int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { - // 记录上一次正在服务的mds index + // Record the last serving mds index int lastWorkingMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前正在使用的mds index + // Record the currently used mds index int curRetryMDSIndex = currentWorkingMDSAddrIndex_; - // 记录当前mds重试的次数 + // Record the number of current mds retries uint64_t currentMDSRetryCount = 0; - // 执行起始时间点 + // Execution start time point uint64_t startTime = TimeUtility::GetTimeofDayMs(); - // rpc超时时间 + // RPC timeout uint64_t rpcTimeOutMS = retryOpt_.rpcTimeoutMs; // The count of normal retry @@ -68,16 +68,16 @@ int RPCExcutorRetryPolicy::DoRPCTask(RPCFunc rpctask, uint64_t maxRetryTimeMS) { int retcode = -1; bool retryUnlimit = (maxRetryTimeMS == 0); while (GoOnRetry(startTime, maxRetryTimeMS)) { - // 1. 创建当前rpc需要使用的channel和controller,执行rpc任务 + // 1. Create the channels and controllers required for the current RPC and execute the RPC task retcode = ExcuteTask(curRetryMDSIndex, rpcTimeOutMS, rpctask); - // 2. 根据rpc返回值进行预处理 + // 2. Preprocessing based on rpc return value if (retcode < 0) { curRetryMDSIndex = PreProcessBeforeRetry( retcode, retryUnlimit, &normalRetryCount, ¤tMDSRetryCount, curRetryMDSIndex, &lastWorkingMDSIndex, &rpcTimeOutMS); continue; - // 3. 此时rpc是正常返回的,更新当前正在服务的mds地址index + // 3. At this point, rpc returns normally and updates the index of the currently serving mds address } else { currentWorkingMDSAddrIndex_.store(curRetryMDSIndex); break; @@ -115,44 +115,44 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, bthread_usleep(retryOpt_.waitSleepMs * 1000); } - // 1. 访问存在的IP地址,但无人监听:ECONNREFUSED - // 2. 正常发送RPC情况下,对端进程挂掉了:EHOSTDOWN - // 3. 对端server调用了Stop:ELOGOFF - // 4. 对端链接已关闭:ECONNRESET - // 5. 在一个mds节点上rpc失败超过限定次数 - // 在这几种场景下,主动切换mds。 + // 1. Access to an existing IP address, but no one is listening: ECONNREFUSED + // 2. In the normal RPC scenario, the remote process has crashed: EHOSTDOWN + // 3. The remote server called Stop: ELOGOFF + // 4. The remote connection has been closed: ECONNRESET + // 5. RPC failures on a single MDS node exceed the specified limit. + // In these scenarios, actively switch the MDS. } else if (status == -EHOSTDOWN || status == -ECONNRESET || status == -ECONNREFUSED || status == -brpc::ELOGOFF || *curMDSRetryCount >= retryOpt_.maxFailedTimesBeforeChangeAddr) { needChangeMDS = true; - // 在开启健康检查的情况下,在底层tcp连接失败时 - // rpc请求会本地直接返回 EHOSTDOWN - // 这种情况下,增加一些睡眠时间,避免大量的重试请求占满bthread - // TODO(wuhanqing): 关闭健康检查 + // When health checks are enabled, in the event of a failure in the underlying TCP connection, + // RPC requests will directly return EHOSTDOWN locally. In this situation, + // add some sleep time to avoid a large number of retry requests overwhelming bthread. + // TODO(wuhanqing): Disable health checks. if (status == -EHOSTDOWN) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } } else if (status == -brpc::ERPCTIMEDOUT || status == -ETIMEDOUT) { rpcTimeout = true; needChangeMDS = false; - // 触发超时指数退避 + // Trigger timeout index backoff *timeOutMS *= 2; *timeOutMS = std::min(*timeOutMS, retryOpt_.maxRPCTimeoutMS); *timeOutMS = std::max(*timeOutMS, retryOpt_.rpcTimeoutMs); } - // 获取下一次需要重试的mds索引 + // Obtain the mds index that needs to be retried next time nextMDSIndex = GetNextMDSIndex(needChangeMDS, curRetryMDSIndex, lastWorkingMDSIndex); // NOLINT - // 更新curMDSRetryCount和rpctimeout + // Update curMDSRetryCount and rpctimeout if (nextMDSIndex != curRetryMDSIndex) { *curMDSRetryCount = 0; *timeOutMS = retryOpt_.rpcTimeoutMs; } else { ++(*curMDSRetryCount); - // 还是在当前mds上重试,且rpc不是超时错误,就进行睡眠,然后再重试 + // Try again on the current mds, and if the rpc is not a timeout error, go to sleep and try again if (!rpcTimeout) { bthread_usleep(retryOpt_.rpcRetryIntervalUS); } @@ -161,16 +161,16 @@ int RPCExcutorRetryPolicy::PreProcessBeforeRetry(int status, bool retryUnlimit, return nextMDSIndex; } /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 + * Obtain the next MDS index to retry based on the input state. The MDS switching logic is as follows: + * Record three states: curRetryMDSIndex, lastWorkingMDSIndex, * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ + * 2. If an RPC fails, it triggers a switch to curRetryMDSIndex. If at this point, lastWorkingMDSIndex is equal to + * currentWorkingMDSIndex, then sequentially switch to the next MDS index. + * If lastWorkingMDSIndex is not equal to currentWorkingMDSIndex, + * it means that another interface has updated currentWorkingMDSAddrIndex_, + * so this time, switch directly to currentWorkingMDSAddrIndex_. */ int RPCExcutorRetryPolicy::GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, @@ -200,7 +200,7 @@ int RPCExcutorRetryPolicy::ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, int ret = channel.Init(mdsaddr.c_str(), nullptr); if (ret != 0) { LOG(WARNING) << "Init channel failed! addr = " << mdsaddr; - // 返回EHOSTDOWN给上层调用者,促使其切换mds + // Return EHOSTDOWN to the upper level caller, prompting them to switch mds return -EHOSTDOWN; } diff --git a/src/client/mds_client.h b/src/client/mds_client.h index 36822fa31c..09e8a79b40 100644 --- a/src/client/mds_client.h +++ b/src/client/mds_client.h @@ -54,22 +54,22 @@ class RPCExcutorRetryPolicy { using RPCFunc = std::function; /** - * 将client与mds的重试相关逻辑抽离 - * @param: task为当前要进行的具体rpc任务 - * @param: maxRetryTimeMS是当前执行最大的重试时间 - * @return: 返回当前RPC的结果 + * Detach the retry related logic between client and mds + * @param: task is the specific rpc task to be carried out currently + * @param: maxRetryTimeMS is the maximum retry time currently executed + * @return: Returns the result of the current RPC */ int DoRPCTask(RPCFunc task, uint64_t maxRetryTimeMS); /** - * 测试使用: 设置当前正在服务的mdsindex + * Test usage: Set the currently serving mdsindex */ void SetCurrentWorkIndex(int index) { currentWorkingMDSAddrIndex_.store(index); } /** - * 测试使用:获取当前正在服务的mdsindex + * Test usage: Obtain the currently serving mdsindex */ int GetCurrentWorkIndex() const { return currentWorkingMDSAddrIndex_.load(); @@ -77,89 +77,86 @@ class RPCExcutorRetryPolicy { private: /** - * rpc失败需要重试,根据cntl返回的不同的状态,确定应该做什么样的预处理。 - * 主要做了以下几件事: - * 1. 如果上一次的RPC是超时返回,那么执行rpc 超时指数退避逻辑 - * 2. 如果上一次rpc返回not connect等返回值,会主动触发切换mds地址重试 - * 3. 更新重试信息,比如在当前mds上连续重试的次数 - * @param[in]: status为当前rpc的失败返回的状态 - * @param normalRetryCount The total count of normal retry - * @param[in][out]: curMDSRetryCount当前mds节点上的重试次数,如果切换mds - * 该值会被重置为1. - * @param[in]: curRetryMDSIndex代表当前正在重试的mds索引 - * @param[out]: lastWorkingMDSIndex上一次正在提供服务的mds索引 - * @param[out]: timeOutMS根据status对rpctimeout进行调整 + * When an RPC fails, it needs to be retried, and based on different statuses returned by `cntl`, + * determine what kind of preprocessing should be done. The main tasks performed are as follows: + * 1. If the last RPC timed out, execute RPC timeout exponential backoff logic. + * 2. If the last RPC returned values like "not connect," it will actively trigger MDS address switching and retry. + * 3. Update retry information, such as the number of consecutive retries on the current MDS. + * @param[in]: status is the status of the current RPC failure. + * @param[in]: normalRetryCount is the total count of normal retries. + * @param[in][out]: curMDSRetryCount is the number of retries on the current MDS node. If MDS switching occurs, + * this value will be reset to 1. + * @param[in]: curRetryMDSIndex represents the current MDS index being retried. + * @param[out]: lastWorkingMDSIndex is the index of the MDS that was providing service in the last attempt. + * @param[out]: timeOutMS is adjusted based on the status to control the RPC timeout. * - * @return: 返回下一次重试的mds索引 + * @return: Returns the next MDS index for the next retry. */ int PreProcessBeforeRetry(int status, bool retryUnlimit, uint64_t *normalRetryCount, uint64_t *curMDSRetryCount, int curRetryMDSIndex, int *lastWorkingMDSIndex, uint64_t *timeOutMS); /** - * 执行rpc发送任务 - * @param[in]: mdsindex为mds对应的地址索引 - * @param[in]: rpcTimeOutMS是rpc超时时间 - * @param[in]: task为待执行的任务 - * @return: channel获取成功则返回0,否则-1 + * Execute rpc send task + * @param[in]: mdsindex is the address index corresponding to mds + * @param[in]: rpcTimeOutMS is the rpc timeout time + * @param[in]: task is the task to be executed + * @return: If the channel is successfully obtained, 0 will be returned. Otherwise, -1 */ int ExcuteTask(int mdsindex, uint64_t rpcTimeOutMS, RPCExcutorRetryPolicy::RPCFunc task); /** - * 根据输入状态获取下一次需要重试的mds索引,mds切换逻辑: - * 记录三个状态:curRetryMDSIndex、lastWorkingMDSIndex、 - * currentWorkingMDSIndex - * 1. 开始的时候curRetryMDSIndex = currentWorkingMDSIndex - * lastWorkingMDSIndex = currentWorkingMDSIndex - * 2. - * 如果rpc失败,会触发切换curRetryMDSIndex,如果这时候lastWorkingMDSIndex - * 与currentWorkingMDSIndex相等,这时候会顺序切换到下一个mds索引, - * 如果lastWorkingMDSIndex与currentWorkingMDSIndex不相等,那么 - * 说明有其他接口更新了currentWorkingMDSAddrIndex_,那么本次切换 - * 直接切换到currentWorkingMDSAddrIndex_ - * @param[in]: needChangeMDS表示当前外围需不需要切换mds,这个值由 - * PreProcessBeforeRetry函数确定 - * @param[in]: currentRetryIndex为当前正在重试的mds索引 - * @param[in][out]: - * lastWorkingindex为上一次正在服务的mds索引,正在重试的mds - * 与正在服务的mds索引可能是不同的mds。 - * @return: 返回下一次要重试的mds索引 + * Get the next MDS index to retry based on the input state. MDS switching logic: + * Record three states: curRetryMDSIndex, lastWorkingMDSIndex, currentWorkingMDSIndex. + * 1. At the beginning, curRetryMDSIndex = currentWorkingMDSIndex, + * lastWorkingMDSIndex = currentWorkingMDSIndex. + * 2. If an RPC fails, it will trigger a switch of curRetryMDSIndex. If at this point, + * lastWorkingMDSIndex is equal to currentWorkingMDSIndex, it will sequentially switch + * to the next MDS index. If lastWorkingMDSIndex is not equal to currentWorkingMDSIndex, + * it means that another interface has updated currentWorkingMDSAddrIndex_. In this case, + * the switch will directly go to currentWorkingMDSAddrIndex_. + * @param[in]: needChangeMDS indicates whether the current peripheral needs to switch MDS. + * This value is determined by the PreProcessBeforeRetry function. + * @param[in]: currentRetryIndex is the current MDS index being retried. + * @param[in][out]: lastWorkingIndex is the index of the last MDS being served in the last retry. + * The MDS being retried and the MDS being served may be different. + * @return: Returns the next MDS index to retry. */ int GetNextMDSIndex(bool needChangeMDS, int currentRetryIndex, int *lastWorkingindex); /** - * 根据输入参数,决定是否继续重试,重试退出条件是重试时间超出最大允许时间 - * IO路径上和非IO路径上的重试时间不一样,非IO路径的重试时间由配置文件的 - * mdsMaxRetryMS参数指定,IO路径为无限循环重试。 + * Based on the input parameters, decide whether to continue retry. The condition for retry exit is that the retry time exceeds the maximum allowed time + * The retry time on IO paths is different from that on non IO paths, and the retry time on non IO paths is determined by the configuration file + * The mdsMaxRetryMS parameter specifies that the IO path is an infinite loop retry. * @param[in]: startTimeMS - * @param[in]: maxRetryTimeMS为最大重试时间 - * @return:需要继续重试返回true, 否则返回false + * @param[in]: maxRetryTimeMS is the maximum retry time + * @return: Need to continue retrying and return true, otherwise return false */ bool GoOnRetry(uint64_t startTimeMS, uint64_t maxRetryTimeMS); /** - * 递增controller id并返回id + *Increment controller id and return id */ uint64_t GetLogId() { return cntlID_.fetch_add(1, std::memory_order_relaxed); } private: - // 执行rpc时必要的配置信息 + // Necessary configuration information for executing rpc MetaServerOption::RpcRetryOption retryOpt_; - // 记录上一次重试过的leader信息 + // Record the leader information from the last retry std::atomic currentWorkingMDSAddrIndex_; - // controller id,用于trace整个rpc IO链路 - // 这里直接用uint64即可,在可预测的范围内,不会溢出 + // controller ID, used to trace the entire RPC IO link + // Simply use uint64 here, within a predictable range, without overflow std::atomic cntlID_; }; struct LeaseRefreshResult; -// MDSClient是client与MDS通信的唯一窗口 +// MDSClient is the only window where the client communicates with MDS class MDSClient : public MDSClientBase, public std::enable_shared_from_this { public: @@ -170,12 +167,12 @@ class MDSClient : public MDSClientBase, LIBCURVE_ERROR Initialize(const MetaServerOption &metaopt); /** - * 创建文件 - * @param: context创建文件信息 - * @return: 成功返回LIBCURVE_ERROR::OK - * 文件已存在返回LIBCURVE_ERROR::EXIST - * 否则返回LIBCURVE_ERROR::FAILED - * 如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, + * Create File + * @param: context Create file information + * @return: Successfully returned LIBCURVE_ERROR::OK + * File already exists Return LIBCURVE_ERROR::EXIST + * Otherwise, return LIBCURVE_ERROR::FAILED + * If authentication fails, return LIBCURVE_ERROR::AUTHFAIL */ LIBCURVE_ERROR CreateFile(const CreateFileContext& context); /** @@ -196,11 +193,11 @@ class MDSClient : public MDSClientBase, LeaseSession *lease); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: csid为要获取的copyset列表 - * @param: cpinfoVec保存获取到的server信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the serverlist information corresponding to the copysetid and update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: csid is the list of copysets to obtain + * @param: cpinfoVec saves the obtained server information + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be returned LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR GetServerList(const LogicPoolID &logicPoolId, @@ -208,9 +205,9 @@ class MDSClient : public MDSClientBase, std::vector> *cpinfoVec); /** - * 获取当前mds所属的集群信息 - * @param[out]: clsctx 为要获取的集群信息 - * @return: 成功返回LIBCURVE_ERROR::OK,否则返回LIBCURVE_ERROR::FAILED + * Obtain the cluster information to which the current mds belongs + * @param[out]: clsctx is the cluster information to be obtained + * @return: Successfully returned LIBCURVE_ERROR::OK, otherwise will be returned LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR GetClusterInfo(ClusterContext *clsctx); @@ -276,19 +273,19 @@ class MDSClient : public MDSClientBase, std::list> *csLocs); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size */ LIBCURVE_ERROR Extend(const std::string &filename, const UserInfo_t &userinfo, uint64_t newsize); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: deleteforce Does it force deletion without placing it in the garbage bin + * @param: id is the file id, with a default value of 0. If the user does not specify this value, the id will not be passed to mds */ LIBCURVE_ERROR DeleteFile(const std::string &filename, const UserInfo_t &userinfo, @@ -304,78 +301,78 @@ class MDSClient : public MDSClientBase, const UserInfo_t &userinfo, uint64_t fileId); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param: seq是出参,返回创建快照时文件的版本信息 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param: seq is an output parameter that returns the version information of the file when creating the snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return LIBCURVE_ERROR::AUTHFAIL, + * Otherwise, return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR CreateSnapShot(const std::string &filename, const UserInfo_t &userinfo, uint64_t *seq); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the snapshot * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return LIBCURVE_ERROR::AUTHFAIL, + * Otherwise, return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR DeleteSnapShot(const std::string &filename, const UserInfo_t &userinfo, uint64_t seq); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: snapif是出参,保存文件的基本信息 + * Obtain snapshot file information with version number seq in the form of a list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the snapshot + * @param: snapif is a parameter that saves the basic information of the file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return LIBCURVE_ERROR::AUTHFAIL, + * Otherwise, return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR ListSnapShot(const std::string &filename, const UserInfo_t &userinfo, const std::vector *seq, std::map *snapif); /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param: segInfo是出参,保存chunk信息 + * Obtain the chunk information of the snapshot and update it to the metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the snapshot + * @param: offset is the offset within the file + * @param: segInfo is the output parameter, saving chunk information * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return LIBCURVE_ERROR::AUTHFAIL, + * Otherwise, return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR GetSnapshotSegmentInfo(const std::string &filename, const UserInfo_t &userinfo, uint64_t seq, uint64_t offset, SegmentInfo *segInfo); /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: filestatus为快照状态 + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: filestatus is the snapshot status */ LIBCURVE_ERROR CheckSnapShotStatus(const std::string &filename, const UserInfo_t &userinfo, uint64_t seq, FileStatus *filestatus); /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param: resp是mds端传递过来的lease信息 - * @param[out]: lease当前文件的session信息 + * The file interface needs to maintain a heartbeat with MDS when opening files, and refresh is used to renew the contract + * The renewal result will be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param: resp is the release information passed from the mds end + * @param[out]: lease the session information of the current file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return LIBCURVE_ERROR::AUTHFAIL, + * Otherwise, return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR RefreshSession(const std::string &filename, const UserInfo_t &userinfo, @@ -383,34 +380,34 @@ class MDSClient : public MDSClientBase, LeaseRefreshResult *resp, LeaseSession *lease = nullptr); /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 + * To close the file, it is necessary to carry the session ID, so that the mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file * @return: - * 成功返回LIBCURVE_ERROR::OK,如果认证失败返回LIBCURVE_ERROR::AUTHFAIL, - * 否则返回LIBCURVE_ERROR::FAILED + * Successfully returned LIBCURVE_ERROR::OK, if authentication fails, return LIBCURVE_ERROR::AUTHFAIL, + * Otherwise, return LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR CloseFile(const std::string &filename, const UserInfo_t &userinfo, const std::string &sessionid); /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged * - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] destFileId 创建的目标文件的Id + * @param[out] destFileId The ID of the target file created * - * @return 错误码 + * @return error code */ LIBCURVE_ERROR CreateCloneFile(const std::string &source, const std::string &destination, @@ -421,36 +418,36 @@ class MDSClient : public MDSClientBase, FInfo *fileinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ LIBCURVE_ERROR CompleteCloneMeta(const std::string &destination, const UserInfo_t &userinfo); /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param:destination 目标文件 - * @param:userinfo用户信息 + * @param: destination target file + * @param: userinfo User Information * - * @return 错误码 + * @return error code */ LIBCURVE_ERROR CompleteCloneFile(const std::string &destination, const UserInfo_t &userinfo); /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required * - * @return 错误码 + * @return error code */ LIBCURVE_ERROR SetCloneFileStatus(const std::string &filename, const FileStatus &filestatus, @@ -458,15 +455,15 @@ class MDSClient : public MDSClientBase, uint64_t fileID = 0); /** - * @brief 重名文件 + * @brief duplicate file * - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file * - * @return 错误码 + * @return error code */ LIBCURVE_ERROR RenameFile(const UserInfo_t &userinfo, const std::string &origin, @@ -475,64 +472,64 @@ class MDSClient : public MDSClientBase, uint64_t destinationId = 0); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the root user can perform changes + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, LIBCURVE_ERROR::AUTHFAILED, etc */ LIBCURVE_ERROR ChangeOwner(const std::string &filename, const std::string &newOwner, const UserInfo_t &userinfo); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: filestatVec当前文件夹内的文件信息 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: filestatVec File information in the current folder */ LIBCURVE_ERROR Listdir(const std::string &dirpath, const UserInfo_t &userinfo, std::vector *filestatVec); /** - * 向mds注册client metric监听的地址和端口 - * @param: ip客户端ip - * @param: dummyServerPort为监听端口 - * @return: 成功返回0, - * 否则返回LIBCURVE_ERROR::FAILED,LIBCURVE_ERROR::AUTHFAILED等 + * Register the address and port for client metric listening with mds + * @param: IP client IP + * @param: dummyServerPort is the listening port + * @return: Successfully returned 0, + * Otherwise, return LIBCURVE_ERROR::FAILED, LIBCURVE_ERROR::AUTHFAILED, etc */ LIBCURVE_ERROR Register(const std::string &ip, uint16_t port); /** - * 获取chunkserver信息 - * @param[in] addr chunkserver地址信息 - * @param[out] chunkserverInfo 待获取的信息 - * @return:成功返回ok + * Obtain chunkserver information + * @param[in] addr chunkserver address information + * @param[out] chunkserverInfo Information to be obtained + * @return: Successfully returned OK */ LIBCURVE_ERROR GetChunkServerInfo(const PeerAddr &addr, CopysetPeerInfo *chunkserverInfo); /** - * 获取server上所有chunkserver的id - * @param[in]: ip为server的ip地址 - * @param[out]: csIds用于保存chunkserver的id - * @return: 成功返回LIBCURVE_ERROR::OK,失败返回LIBCURVE_ERROR::FAILED + * Obtain the IDs of all chunkservers on the server + * @param[in]: ip is the IP address of the server + * @param[out]: csIds is used to save the id of the chunkserver + * @return: Successfully returned LIBCURVE_ERROR::OK, failure returns LIBCURVE_ERROR::FAILED */ LIBCURVE_ERROR ListChunkServerInServer(const std::string &ip, std::vector *csIds); /** - * 析构,回收资源 + * Deconstruct and recycle resources */ void UnInitialize(); /** - * 将mds侧错误码对应到libcurve错误码 - * @param: statecode为mds一侧错误码 - * @param[out]: 出参errcode为libcurve一侧的错误码 + * Map the mds side error code to the libcurve error code + * @param: statecode is the error code on the mds side + * @param[out]: The errcode of the output parameter is the error code on the side of libcurve */ void MDSStatusCode2LibcurveError(const ::curve::mds::StatusCode &statcode, LIBCURVE_ERROR *errcode); @@ -540,13 +537,13 @@ class MDSClient : public MDSClientBase, LIBCURVE_ERROR ReturnError(int retcode); private: - // 初始化标志,放置重复初始化 + // Initialization flag, placing duplicate initialization bool inited_ = false; - // 当前模块的初始化option配置 + // Initialization option configuration for the current module MetaServerOption metaServerOpt_; - // client与mds通信的metric统计 + // Metric statistics of communication between client and mds MDSClientMetric mdsClientMetric_; RPCExcutorRetryPolicy rpcExcutor_; diff --git a/src/client/mds_client_base.h b/src/client/mds_client_base.h index 64178e43e9..8bef7f6e90 100644 --- a/src/client/mds_client_base.h +++ b/src/client/mds_client_base.h @@ -92,18 +92,18 @@ using curve::mds::topology::ListPoolsetResponse; extern const char* kRootUserName; -// MDSClientBase将所有与mds的RPC接口抽离,与业务逻辑解耦 -// 这里只负责rpc的发送,具体的业务处理逻辑通过reponse和controller向上 -// 返回给调用者,有调用者处理 +// MDSClientBase abstracts all RPC interfaces with the MDS, decoupling them from business logic. +// Here, it is responsible only for sending RPC requests, while the specific business logic processing is returned to the caller through responses and controllers, +// which are handled by the caller. class MDSClientBase { public: /** - * 打开文件 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Open File + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void OpenFile(const std::string& filename, const UserInfo_t& userinfo, @@ -112,27 +112,27 @@ class MDSClientBase { brpc::Channel* channel); /** - * 创建文件 - * @param: filename创建文件的文件名 - * @param: userinfo为user信息 - * @param: size文件长度 - * @param: normalFile表示创建的是普通文件还是目录文件,如果是目录则忽略size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create File + * @param: filename The file name used to create the file + * @param: userinfo is the user information + * @param: size File length + * @param: normalFile indicates whether the created file is a regular file or a directory file. If it is a directory, size is ignored + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void CreateFile(const CreateFileContext& context, CreateFileResponse* response, brpc::Controller* cntl, brpc::Channel* channel); /** - * 关闭文件,需要携带sessionid,这样mds端会在数据库删除该session信息 - * @param: filename是要续约的文件名 - * @param: userinfo为user信息 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * To close the file, it is necessary to carry the sessionid, so that the mds side will delete the session information in the database + * @param: filename is the file name to be renewed + * @param: userinfo is the user information + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void CloseFile(const std::string& filename, const UserInfo_t& userinfo, @@ -141,12 +141,12 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取文件信息,fi是出参 - * @param: filename是文件名 - * @param: userinfo为user信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain file information, where fi is the output parameter + * @param: filename is the file name + * @param: userinfo is the user information + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void GetFileInfo(const std::string& filename, const UserInfo_t& userinfo, @@ -171,12 +171,12 @@ class MDSClientBase { brpc::Channel* channel); /** - * 创建版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要创建快照的文件名 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Create a snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to create the snapshot + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void CreateSnapShot(const std::string& filename, const UserInfo_t& userinfo, @@ -184,13 +184,13 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除版本号为seq的快照 - * @param: userinfo是用户信息 - * @param: filename是要快照的文件名 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete snapshot with version number seq + * @param: userinfo is the user information + * @param: filename is the file name to be snapshot + * @param: seq is the version information of the file when creating the snapshot + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void DeleteSnapShot(const std::string& filename, const UserInfo_t& userinfo, @@ -199,13 +199,13 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 以列表的形式获取版本号为seq的snapshot文件信息,snapif是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain snapshot file information with version number seq in the form of a list, where snapif is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the snapshot + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void ListSnapShot(const std::string& filename, const UserInfo_t& userinfo, @@ -214,14 +214,14 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照的chunk信息并更新到metacache,segInfo是出参 - * @param: filename是要快照的文件名 - * @param: userinfo是用户信息 - * @param: seq是创建快照时文件的版本信息 - * @param: offset是文件内的偏移 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the chunk information of the snapshot and update it to the metacache, where segInfo is the output parameter + * @param: filename is the file name to be snapshot + * @param: userinfo is the user information + * @param: seq is the version information of the file when creating the snapshot + * @param: offset is the offset within the file + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void GetSnapshotSegmentInfo(const std::string& filename, const UserInfo_t& userinfo, @@ -232,13 +232,13 @@ class MDSClientBase { brpc::Channel* channel); /** - * 文件接口在打开文件的时候需要与mds保持心跳,refresh用来续约 - * 续约结果将会通过LeaseRefreshResult* resp返回给调用层 - * @param: filename是要续约的文件名 - * @param: sessionid是文件的session信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * The file interface needs to maintain a heartbeat with MDS when opening files, and refresh is used to renew the contract + * The renewal result will be returned to the calling layer through LeaseRefreshResult* resp + * @param: filename is the file name to be renewed + * @param: sessionid is the session information of the file + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void RefreshSession(const std::string& filename, const UserInfo_t& userinfo, @@ -247,13 +247,13 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取快照状态 - * @param: filenam文件名 - * @param: userinfo是用户信息 - * @param: seq是文件版本号信息 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Get snapshot status + * @param: filenam file name + * @param: userinfo is the user information + * @param: seq is the file version number information + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void CheckSnapShotStatus(const std::string& filename, const UserInfo_t& userinfo, @@ -262,12 +262,12 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取copysetid对应的serverlist信息并更新到metacache - * @param: logicPoolId逻辑池信息 - * @param: copysetidvec为要获取的copyset列表 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain the serverlist information corresponding to the copysetid and update it to the metacache + * @param: logicPoolId Logical Pool Information + * @param: copysetidvec is the list of copysets to obtain + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void GetServerList(const LogicPoolID& logicalpooid, const std::vector& copysetidvec, @@ -276,10 +276,10 @@ class MDSClientBase { brpc::Channel* channel); /** - * 获取mds对应的cluster id - * @param[out]: response为该rpc的respoonse,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the cluster ID corresponding to the mds + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void GetClusterInfo(GetClusterInfoResponse* response, brpc::Controller* cntl, @@ -290,18 +290,18 @@ class MDSClientBase { brpc::Channel* channel); /** - * 创建clone文件 - * @param source 克隆源文件名 - * @param:destination clone目标文件名 - * @param:userinfo 用户信息 - * @param:size 文件大小 - * @param:sn 版本号 - * @param:chunksize是创建文件的chunk大小 + * Create clone file + * @param source Clone source file name + * @param: destination clone Destination file name + * @param: userinfo User Information + * @param: size File size + * @param: sn version number + * @param: chunksize is the chunk size of the created file * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void CreateCloneFile(const std::string& source, const std::string& destination, @@ -317,14 +317,14 @@ class MDSClientBase { brpc::Channel* channel); /** - * @brief 通知mds完成Clone Meta - * @param: filename 目标文件 - * @param: filestatus为要设置的目标状态 - * @param: userinfo用户信息 - * @param: fileId为文件ID信息,非必填 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief Notify mds to complete Clone Meta + * @param: filename Target file + * @param: filestatus is the target state to be set + * @param: userinfo User information + * @param: fileId is the file ID information, not required + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void SetCloneFileStatus(const std::string& filename, const FileStatus& filestatus, @@ -357,15 +357,15 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * @brief 重名文件 - * @param:userinfo 用户信息 - * @param:originId 被恢复的原始文件Id - * @param:destinationId 克隆出的目标文件Id - * @param:origin 被恢复的原始文件名 - * @param:destination 克隆出的目标文件 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * @brief duplicate file + * @param: userinfo User Information + * @param: originId The original file ID that was restored + * @param: destinationId The cloned target file ID + * @param: origin The original file name of the recovered file + * @param: destination The cloned target file + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void RenameFile(const UserInfo_t& userinfo, const std::string &origin, @@ -376,13 +376,13 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 扩展文件 - * @param: userinfo是用户信息 - * @param: filename文件名 - * @param: newsize新的size - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Extension file + * @param: userinfo is the user information + * @param: filename File name + * @param: newsize New size + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void Extend(const std::string& filename, const UserInfo_t& userinfo, @@ -391,14 +391,14 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 删除文件 - * @param: userinfo是用户信息 - * @param: filename待删除的文件名 - * @param: deleteforce是否强制删除而不放入垃圾回收站 - * @param: id为文件id,默认值为0,如果用户不指定该值,不会传id到mds - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Delete files + * @param: userinfo is the user information + * @param: filename The file name to be deleted + * @param: Does deleteforce force deletion without placing it in the garbage bin + * @param: id is the file id, with a default value of 0. If the user does not specify this value, the id will not be passed to mds + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void DeleteFile(const std::string& filename, const UserInfo_t& userinfo, @@ -425,13 +425,13 @@ class MDSClientBase { brpc::Channel* channel); /** - * 变更owner - * @param: filename待变更的文件名 - * @param: newOwner新的owner信息 - * @param: userinfo执行此操作的user信息,只有root用户才能执行变更 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Change owner + * @param: filename The file name to be changed + * @param: newOwner New owner information + * @param: userinfo The user information for performing this operation, only the root user can perform changes + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void ChangeOwner(const std::string& filename, const std::string& newOwner, @@ -440,12 +440,12 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 枚举目录内容 - * @param: userinfo是用户信息 - * @param: dirpath是目录路径 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Enumerate directory contents + * @param: userinfo is the user information + * @param: dirpath is the directory path + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void Listdir(const std::string& dirpath, const UserInfo_t& userinfo, @@ -453,12 +453,12 @@ class MDSClientBase { brpc::Controller* cntl, brpc::Channel* channel); /** - * 获取chunkserverID信息 - * @param[in]: ip为当前client的监听地址 - * @param[in]: port为监听端口 - * @param[out]: response为该rpc的response,提供给外部处理 - * @param[in|out]: cntl既是入参,也是出参,返回RPC状态 - * @param[in]:channel是当前与mds建立的通道 + * Obtain chunkserverID information + * @param[in]: IP is the listening address of the current client + * @param[in]: port is the listening port + * @param[out]: response is the response of the rpc, provided for external processing + * @param[in|out]: cntl is both an input and output parameter, returning RPC status + * @param[in]: channel is the current channel established with MDS */ void GetChunkServerInfo(const std::string& ip, uint16_t port, @@ -467,11 +467,11 @@ class MDSClientBase { brpc::Channel* channel); /** - * 获取server上的所有chunkserver的id - * @param[in]: ip为当前server的地址 - * @param[out]: response是当前rpc调用的response,返回给外部处理 - * @param[in|out]: cntl既是入参也是出参 - * @param[in]: channel是当前与mds建立的通道 + * Obtain the IDs of all chunkservers on the server + * @param[in]: IP is the address of the current server + * @param[out]: response is the response of the current rpc call, returned to external processing + * @param[in|out]: cntl is both an input and output parameter + * @param[in]: channel is the current channel established with MDS */ void ListChunkServerInServer(const std::string& ip, ListChunkServerResponse* response, @@ -480,8 +480,8 @@ class MDSClientBase { private: /** - * 为不同的request填充user信息 - * @param: request是待填充的变量指针 + * Fill in user information for different requests + * @param: request is the pointer to the variable to be filled in */ template void FillUserInfo(T* request, const UserInfo_t& userinfo) { diff --git a/src/client/metacache.cpp b/src/client/metacache.cpp index f8b9098775..db161438c5 100644 --- a/src/client/metacache.cpp +++ b/src/client/metacache.cpp @@ -123,7 +123,7 @@ int MetaCache::GetLeader(LogicPoolID logicPoolId, << "logicpool id = " << logicPoolId << ", copyset id = " << copysetId; - // 重试失败,这时候需要向mds重新拉取最新的copyset信息了 + // The retry failed. At this point, it is necessary to retrieve the latest copyset information from mds again ret = UpdateCopysetInfoFromMDS(logicPoolId, copysetId); if (ret == 0) { continue; @@ -166,7 +166,7 @@ int MetaCache::UpdateLeaderInternal(LogicPoolID logicPoolId, ret = toupdateCopyset->UpdateLeaderInfo(leaderaddr); - // 如果更新失败,说明leader地址不在当前配置组中,从mds获取chunkserver的信息 + // If the update fails, it indicates that the leader address is not in the current configuration group. Obtain chunkserver information from MDS if (ret == -1 && !leaderaddr.IsEmpty()) { CopysetPeerInfo csInfo; ret = mdsclient_->GetChunkServerInfo(leaderaddr, &csInfo); @@ -201,9 +201,9 @@ int MetaCache::UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, return -1; } - // 更新chunkserverid到copyset映射关系 + // Update chunkserverid to copyset mapping relationship UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); return 0; @@ -224,9 +224,9 @@ void MetaCache::UpdateCopysetInfoIfMatchCurrentLeader( << ", copyset id = " << copysetId << ", current leader = " << leaderAddr.ToString(); - // 更新chunkserverid到copyset的映射关系 + // Update the mapping relationship between chunkserverid and copyset UpdateChunkserverCopysetInfo(logicPoolId, copysetInfos[0]); - // 更新logicpool和copysetid到copysetinfo的映射 + // Update the mapping of logicpool and copysetid to copysetinfo UpdateCopysetInfo(logicPoolId, copysetId, copysetInfos[0]); } } @@ -315,11 +315,11 @@ void MetaCache::SetChunkserverUnstable(ChunkServerID csid) { ChunkServerID leaderid; if (cpinfo->second.GetCurrentLeaderID(&leaderid)) { if (leaderid == csid) { - // 只设置leaderid为当前serverid的Lcopyset + // Set only the Lcopyset with leaderid as the current serverid cpinfo->second.SetLeaderUnstableFlag(); } } else { - // 当前copyset集群信息未知,直接设置LeaderUnStable + // The current copyset cluster information is unknown, set LeaderUnStable directly cpinfo->second.SetLeaderUnstableFlag(); } } @@ -336,13 +336,13 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, const CopysetInfo& cpinfo) { ReadLockGuard rdlk(rwlock4CopysetInfo_); const auto key = CalcLogicPoolCopysetID(lpid, cpinfo.cpid_); - // 先获取原来的chunkserver到copyset映射 + // First, obtain the original chunkserver to copyset mapping auto previouscpinfo = lpcsid2CopsetInfoMap_.find(key); if (previouscpinfo != lpcsid2CopsetInfoMap_.end()) { std::vector newID; std::vector changedID; - // 先判断当前copyset有没有变更chunkserverid + // Determine if the current copyset has changed the chunkserverid for (auto iter : previouscpinfo->second.csinfos_) { changedID.push_back(iter.peerID); } @@ -357,7 +357,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 删除变更的copyset信息 + // Delete changed copyset information for (auto chunkserverid : changedID) { { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); @@ -368,7 +368,7 @@ void MetaCache::UpdateChunkserverCopysetInfo(LogicPoolID lpid, } } - // 更新新的copyset信息到chunkserver + // Update new copyset information to chunkserver for (auto chunkserverid : newID) { WriteLockGuard wrlk(rwlock4CSCopysetIDMap_); chunkserverCopysetIDMap_[chunkserverid].emplace(lpid, cpinfo.cpid_); diff --git a/src/client/metacache.h b/src/client/metacache.h index da1f56efde..2fe173a058 100644 --- a/src/client/metacache.h +++ b/src/client/metacache.h @@ -60,24 +60,24 @@ class MetaCache { virtual ~MetaCache() = default; /** - * 初始化函数 - * @param: metacacheopt为当前metacache的配置option信息 - * @param: mdsclient为与mds通信的指针。 - * 为什么这里需要把mdsclient传进来? - * 因为首先metacache充当的角色就是对于MDS一侧的信息缓存 - * 所以对于底层想使用metacache的copyset client或者chunk closure - * 来说,他只需要知道metacache就可以了,不需要再去向mds查询信息, - * 在copyset client或者chunk closure发送IO失败之后会重新获取leader - * 然后再重试,如果leader获取不成功,需要向mds一侧查询当前copyset的最新信息, - * 这里将查询mds封装在内部了,这样copyset client和chunk closure就不感知mds了 + * Initialization function + * @param: metacacheopt is the configuration option information for the current Metacache + * @param: mdsclient is the pointer that communicates with mds. + * Why does it need to pass in mdsclient here? + * Because the first role that Metacache plays is to cache information on the MDS side + * So for low-level users who want to use Metacache's copyset client or chunk closure + * For example, he only needs to know the Metacache and no longer needs to query information from MDS, + * After the copyset client or chunk closure fails to send IO, it will retrieve the leader again + * Then try again. If the leader acquisition is unsuccessful, you need to query the latest information of the current copyset from the mds side, + * Here, the query mds is encapsulated internally, so that the copyset client and chunk closure are not aware of mds */ void Init(const MetaCacheOption &metaCacheOpt, MDSClient *mdsclient); /** - * 通过chunk index获取chunkid信息 - * @param: chunkidx以index查询chunk对应的id信息 - * @param: chunkinfo是出参,存储chunk的版本信息 - * @param: 成功返回OK, 否则返回UNKNOWN_ERROR + * Obtain chunk information through chunk index + * @param: chunkidx queries the ID information corresponding to chunks using index + * @param: chunkinfo is an outgoing parameter that stores the version information of the chunk + * @param: Successfully returns OK, otherwise returns UNKNOWN_ ERROR */ virtual MetaCacheErrorType GetChunkInfoByIndex(ChunkIndex chunkidx, ChunkIDInfo_t *chunkinfo); @@ -89,63 +89,63 @@ class MetaCache { const ChunkIDInfo &chunkinfo); /** - * sender发送数据的时候需要知道对应的leader然后发送给对应的chunkserver - * 如果get不到的时候,外围设置refresh为true,然后向chunkserver端拉取最新的 - * server信息,然后更新metacache。 - * 如果当前copyset的leaderMayChange置位的时候,即使refresh为false,也需要 - * 先去拉取新的leader信息,才能继续下发IO. - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: serverId对应chunkserver的id信息,是出参 - * @param: serverAddr为serverid对应的ip信息 - * @param: refresh,如果get不到的时候,外围设置refresh为true, - * 然后向chunkserver端拉取最新的 - * @param: fm用于统计metric - * @param: 成功返回0, 否则返回-1 + * When the sender sends data, it needs to know the corresponding leader and send it to the corresponding chunkserver. + * If it cannot retrieve the leader, and the external setting has "refresh" set to true, it will then fetch the latest + * server information from the chunkserver side and update the metacache. + * If the "leaderMayChange" flag of the current copyset is set, even if "refresh" is set to false, + * it is still necessary to fetch the new leader information before continuing with IO operations. + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: The serverId corresponds to the ID information of the chunkserver, which is the output parameter + * @param: serverAddr is the IP information corresponding to serverid + * @param: refresh. If it cannot be obtained, set the peripheral refresh to true, + * Then pull the latest data from the chunkserver end + * @param: fm for statistical metrics + * @param: Successfully returns 0, otherwise returns -1 */ virtual int GetLeader(LogicPoolID logicPoolId, CopysetID copysetId, ChunkServerID *serverId, butil::EndPoint *serverAddr, bool refresh = false, FileMetric *fm = nullptr); /** - * 更新某个copyset的leader信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @param leaderAddr leader地址 - * @return: 成功返回0, 否则返回-1 + * Update the leader information of a copyset + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param leaderAddr leader address + * @return: Successfully returns 0, otherwise returns -1 */ virtual int UpdateLeader(LogicPoolID logicPoolId, CopysetID copysetId, const butil::EndPoint &leaderAddr); /** - * 更新copyset数据信息,包含serverlist - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @param: csinfo是要更新的copyset info + * Update copyset data information, including serverlist + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @param: csinfo is the copyset info to be updated */ virtual void UpdateCopysetInfo(LogicPoolID logicPoolId, CopysetID copysetId, const CopysetInfo &csinfo); /** - * 通过chunk id更新chunkid信息 - * @param: cid为chunkid - * @param: cidinfo为当前chunk对应的id信息 + * Update chunk information through chunk id + * @param: cid is chunkid + * @param: cininfo is the ID information corresponding to the current chunk */ virtual void UpdateChunkInfoByID(ChunkID cid, const ChunkIDInfo &cidinfo); /** - * 获取当前copyset的server list信息 - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 当前copyset的copysetinfo信息 + * Obtain the server list information for the current copyset + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: The copysetinfo information of the current copyset */ virtual CopysetInfo GetServerList(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 将ID转化为cache的key - * @param: lpid逻辑池id - * @param: cpid是copysetid - * @return: 为当前的key + * Convert ID to key for cache + * @param: lpid Logical Pool ID + * @param: cpid is copysetid + * @return: is the current key */ static LogicPoolCopysetID CalcLogicPoolCopysetID(LogicPoolID logicPoolId, CopysetID copysetId) { @@ -154,30 +154,30 @@ class MetaCache { } /** - * @brief: 标记整个server上的所有chunkserver为unstable状态 + * @brief: Mark all chunkservers on the entire server as unstable * - * @param: serverIp server的ip地址 - * @return: 0 设置成功 / -1 设置失败 + * @param: serverIp The IP address of the server + * @return: 0 set successfully/-1 set failed */ virtual int SetServerUnstable(const std::string &endPoint); /** - * 如果leader所在的chunkserver出现问题了,导致RPC失败。这时候这个 - * chunkserver上的其他leader copyset也会存在同样的问题,所以需要 - * 通知当前chunkserver上的leader copyset. 主要是通过设置这个copyset - * 的leaderMayChange标志,当该copyset的再次下发IO的时候会查看这个 - * 状态,当这个标志位置位的时候,IO下发需要先进行leader refresh, - * 如果leaderrefresh成功,leaderMayChange会被reset。 - * SetChunkserverUnstable就会遍历当前chunkserver上的所有copyset - * 并设置这个chunkserver的leader copyset的leaderMayChange标志。 - * @param: csid是当前不稳定的chunkserver ID + * If the chunkserver where the leader is located encounters a problem, leading to RPC failures, + * then other leader copysets on this chunkserver will also face the same issue. + * Therefore, it is necessary to notify the leader copysets on the current chunkserver. This is primarily done by setting the "leaderMayChange" flag for these copysets. + * When IO is issued again for a copyset with this flag set, the system will check this status. When this flag is set, + * IO issuance will first perform a leader refresh. + * If the leader refresh is successful, the "leaderMayChange" flag will be reset. + * The "SetChunkserverUnstable" operation will iterate through all the copysets on the current chunkserver and + * set the "leaderMayChange" flag for the leader copysets of that chunkserver. + * @param: csid is the currently unstable chunkserver ID */ virtual void SetChunkserverUnstable(ChunkServerID csid); /** - * 向map中添加对应chunkserver的copyset信息 - * @param: csid为当前chunkserverid - * @param: cpid为当前copyset的id信息 + * Add copyset information for the corresponding chunkserver to the map + * @param: csid is the current chunkserverid + * @param: cpid is the ID information of the current copyset */ virtual void AddCopysetIDInfo(ChunkServerID csid, const CopysetIDInfo &cpid); @@ -207,14 +207,14 @@ class MetaCache { } /** - * 获取对应的copyset的LeaderMayChange标志 + * Get the LeaderMayChange flag of the corresponding copyset */ virtual bool IsLeaderMayChange(LogicPoolID logicpoolId, CopysetID copysetId); /** - * 测试使用 - * 获取copysetinfo信息 + * Test Usage + * Obtain copysetinfo information */ virtual CopysetInfo GetCopysetinfo(LogicPoolID lpid, CopysetID csid); @@ -235,29 +235,29 @@ class MetaCache { private: /** - * @brief 从mds更新copyset复制组信息 - * @param logicPoolId 逻辑池id - * @param copysetId 复制组id - * @return 0 成功 / -1 失败 + * @brief Update copyset replication group information from mds + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @return 0 successful/-1 failed */ int UpdateCopysetInfoFromMDS(LogicPoolID logicPoolId, CopysetID copysetId); /** - * 更新copyset的leader信息 - * @param[in]: logicPoolId逻辑池信息 - * @param[in]: copysetId复制组信息 - * @param[out]: toupdateCopyset为metacache中待更新的copyset信息指针 + * Update the leader information of the copyset + * @param[in]: logicPoolId Logical Pool Information + * @param[in]: copysetId Copy group information + * @param[out]: toupdateCopyset is the pointer to the copyset information to be updated in the metacache */ int UpdateLeaderInternal(LogicPoolID logicPoolId, CopysetID copysetId, CopysetInfo *toupdateCopyset, FileMetric *fm = nullptr); /** - * 从mds拉去复制组信息,如果当前leader在复制组中 - * 则更新本地缓存,反之则不更新 - * @param: logicPoolId 逻辑池id - * @param: copysetId 复制组id - * @param: leaderAddr 当前的leader address + * Pull replication group information from MDS, if the current leader is in the replication group + * Update local cache, otherwise do not update + * @param: logicPoolId Logical Pool ID + * @param: copysetId Copy group ID + * @param: leaderAddr The current leader address */ void UpdateCopysetInfoIfMatchCurrentLeader(LogicPoolID logicPoolId, CopysetID copysetId, @@ -267,36 +267,36 @@ class MetaCache { MDSClient *mdsclient_; MetaCacheOption metacacheopt_; - // chunkindex到chunkidinfo的映射表 + // Mapping table from chunkindex to chunkidinfo CURVE_CACHELINE_ALIGNMENT ChunkIndexInfoMap chunkindex2idMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4Segments_; CURVE_CACHELINE_ALIGNMENT std::unordered_map segments_; // NOLINT - // logicalpoolid和copysetid到copysetinfo的映射表 + // Mapping table for logicalpoolid and copysetid to copysetinfo CURVE_CACHELINE_ALIGNMENT CopysetInfoMap lpcsid2CopsetInfoMap_; - // chunkid到chunkidinfo的映射表 + // chunkid to chunkidinfo mapping table CURVE_CACHELINE_ALIGNMENT ChunkInfoMap chunkid2chunkInfoMap_; - // 三个读写锁分别保护上述三个映射表 + // Three read and write locks protect each of the three mapping tables mentioned above CURVE_CACHELINE_ALIGNMENT RWLock rwlock4chunkInfoMap_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4ChunkInfo_; CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CopysetInfo_; - // chunkserverCopysetIDMap_存放当前chunkserver到copyset的映射 - // 当rpc closure设置SetChunkserverUnstable时,会设置该chunkserver - // 的所有copyset处于leaderMayChange状态,后续copyset需要判断该值来看 - // 是否需要刷新leader + // chunkserverCopysetIDMap_ stores the mapping of the current chunkserver to copysets. + // When an RPC closure sets SetChunkserverUnstable, + // it sets all the copysets of that chunkserver to the leaderMayChange state. + // Subsequent copyset operations will check this value to determine whether a leader refresh is needed. - // chunkserverid到copyset的映射 + // Mapping chunkserverid to copyset std::unordered_map> chunkserverCopysetIDMap_; // NOLINT - // 读写锁保护unStableCSMap + // Read write lock protection unstableCSMap CURVE_CACHELINE_ALIGNMENT RWLock rwlock4CSCopysetIDMap_; - // 当前文件信息 + // Current file information FInfo fileInfo_; // epoch info diff --git a/src/client/metacache_struct.h b/src/client/metacache_struct.h index 8a308f722f..639f7ddd5f 100644 --- a/src/client/metacache_struct.h +++ b/src/client/metacache_struct.h @@ -43,14 +43,14 @@ using curve::common::ReadLockGuard; using curve::common::SpinLock; using curve::common::WriteLockGuard; -// copyset内的chunkserver节点的基本信息 -// 包含当前chunkserver的id信息,以及chunkserver的地址信息 +// Basic information of chunkserver nodes in the copyset +// Contains the ID information of the current chunkserver and the address information of the chunkserver template struct CURVE_CACHELINE_ALIGNMENT CopysetPeerInfo { - // 当前chunkserver节点的ID + // The ID of the current chunkserver node T peerID = 0; - // 当前chunkserver节点的内部地址 + // The internal address of the current chunkserver node PeerAddr internalAddr; - // 当前chunkserver节点的外部地址 + // The external address of the current chunkserver node PeerAddr externalAddr; CopysetPeerInfo() = default; @@ -85,16 +85,16 @@ inline std::ostream &operator<<(std::ostream &os, const CopysetPeerInfo &c) { // copyset's informations inclucing peer and leader information template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { - // leader存在变更可能标志位 + // Possible flag bits for leader changes bool leaderMayChange_ = false; - // 当前copyset的节点信息 + // Node information of the current copyset std::vector> csinfos_; - // leader在本copyset信息中的索引,用于后面避免重复尝试同一个leader + // The index of the leader in this copyset information is used to avoid repeated attempts at the same leader in the future int16_t leaderindex_ = -1; - // 当前copyset的id信息 + // The ID information of the current copyset CopysetID cpid_ = 0; LogicPoolID lpid_ = 0; - // 用于保护对copyset信息的修改 + // Used to protect modifications to copyset information SpinLock spinlock_; CopysetInfo() = default; @@ -126,7 +126,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 获取当前leader的索引 + * Get the index of the current leader */ int16_t GetCurrentLeaderIndex() const { return leaderindex_; } @@ -144,8 +144,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 更新leaderindex,如果leader不在当前配置组中,则返回-1 - * @param: addr为新的leader的地址信息 + * Update the leaderindex, if the leader is not in the current configuration group, return -1 + * @param: addr is the address information of the new leader */ int UpdateLeaderInfo(const PeerAddr &addr, CopysetPeerInfo csInfo = CopysetPeerInfo()) { @@ -163,7 +163,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { tempindex++; } - // 新的addr不在当前copyset内,如果csInfo不为空,那么将其插入copyset + // The new addr is not within the current copyset. If csInfo is not empty, insert it into the copyset if (!exists && !csInfo.IsEmpty()) { csinfos_.push_back(csInfo); } else if (exists == false) { @@ -183,7 +183,7 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { * @param[out]: ep */ int GetLeaderInfo(T *peerid, EndPoint *ep) { - // 第一次获取leader,如果当前leader信息没有确定,返回-1,由外部主动发起更新leader + // For the first time obtaining the leader, if the current leader information is not determined, return -1, and the external initiative will be initiated to update the leader if (leaderindex_ < 0 || leaderindex_ >= static_cast(csinfos_.size())) { LOG(INFO) << "GetLeaderInfo pool " << lpid_ << ", copyset " << cpid_ @@ -203,8 +203,8 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 添加copyset的peerinfo - * @param: csinfo为待添加的peer信息 + * Add peerinfo for copyset + * @param: csinfo is the peer information to be added */ void AddCopysetPeerInfo(const CopysetPeerInfo &csinfo) { spinlock_.Lock(); @@ -213,19 +213,19 @@ template struct CURVE_CACHELINE_ALIGNMENT CopysetInfo { } /** - * 当前CopysetInfo是否合法 + * Is the current CopysetInfo legal */ bool IsValid() const { return !csinfos_.empty(); } /** - * 更新leaderindex + * Update leaderindex */ void UpdateLeaderIndex(int index) { leaderindex_ = index; } /** - * 当前copyset是否存在对应的chunkserver address - * @param: addr需要检测的chunkserver - * @return: true存在;false不存在 + * Does the current copyset have a corresponding chunkserver address + * @param: addr Chunkserver to be detected + * @return: true exists; False does not exist */ bool HasPeerInCopyset(const PeerAddr &addr) const { for (const auto &peer : csinfos_) { diff --git a/src/client/request_closure.h b/src/client/request_closure.h index 2fb0272662..eed5b41400 100644 --- a/src/client/request_closure.h +++ b/src/client/request_closure.h @@ -74,35 +74,35 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure } /** - * @brief 获取当前closure属于哪个request + * @brief to obtain which request the current closure belongs to */ virtual RequestContext* GetReqCtx() { return reqCtx_; } /** - * @brief 获取当前request属于哪个iotracker + * @brief: Obtain which iotracker the current request belongs to */ virtual IOTracker* GetIOTracker() { return tracker_; } /** - * @brief 设置当前属于哪一个iotracker + * @brief Set which iotracker currently belongs to */ void SetIOTracker(IOTracker* ioTracker) { tracker_ = ioTracker; } /** - * @brief 设置所属的iomanager + * @brief Set the iomanager to which it belongs */ void SetIOManager(IOManager* ioManager) { ioManager_ = ioManager; } /** - * @brief 设置当前closure重试次数 + * @brief Set the current closure retry count */ void IncremRetriedTimes() { retryTimes_++; @@ -113,35 +113,35 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure } /** - * 设置metric + * Set metric */ void SetFileMetric(FileMetric* fm) { metric_ = fm; } /** - * 获取metric指针 + * Get metric pointer */ FileMetric* GetMetric() const { return metric_; } /** - * 获取下一次rpc超时时间, rpc超时时间实现了指数退避的策略 + * Obtain the next RPC timeout, which implements an exponential backoff strategy */ uint64_t GetNextTimeoutMS() const { return nextTimeoutMS_; } /** - * 设置下次重试超时时间 + * Set the next retry timeout time */ void SetNextTimeOutMS(uint64_t timeout) { nextTimeoutMS_ = timeout; } /** - * 设置当前的IO为悬挂IO + * Set the current IO as suspended IO */ void SetSuspendRPCFlag() { suspendRPC_ = true; @@ -152,31 +152,31 @@ class CURVE_CACHELINE_ALIGNMENT RequestClosure } private: - // suspend io标志 + // suspend io logo bool suspendRPC_ = false; // whether own inflight count bool ownInflight_ = false; - // 当前request的错误码 + // The error code of the current request int errcode_ = -1; - // 当前request的tracker信息 + // Tracker information for the current request IOTracker* tracker_ = nullptr; - // closure的request信息 + // Request information for closures RequestContext* reqCtx_ = nullptr; - // metric信息 + // metric Information FileMetric* metric_ = nullptr; - // 重试次数 + // Number of retries uint64_t retryTimes_ = 0; - // 当前closure属于的iomanager + // The iomanager to which the current closure belongs IOManager* ioManager_ = nullptr; - // 下一次rpc超时时间 + // Next RPC timeout uint64_t nextTimeoutMS_ = 0; }; diff --git a/src/client/request_context.h b/src/client/request_context.h index cfca2c60d5..47915ec745 100644 --- a/src/client/request_context.h +++ b/src/client/request_context.h @@ -73,10 +73,10 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { done_ = nullptr; } - // chunk的ID信息,sender在发送rpc的时候需要附带其ID信息 + // The ID information of the chunk, which the sender needs to include when sending rpc ChunkIDInfo idinfo_; - // 用户IO被拆分之后,其小IO有自己的offset和length + // After user IO is split, its small IO has its own offset and length off_t offset_ = 0; OpType optype_ = OpType::UNKNOWN; size_t rawlength_ = 0; @@ -91,28 +91,28 @@ struct CURVE_CACHELINE_ALIGNMENT RequestContext { // write data of current request butil::IOBuf writeData_; - // 因为RPC都是异步发送,因此在一个Request结束时,RPC回调调用当前的done - // 来告知当前的request结束了 + // Because RPC is sent asynchronously, at the end of a request, the RPC callback calls the current done + // To inform you that the current request is over RequestClosure* done_ = nullptr; // file id uint64_t fileId_; // file epoch uint64_t epoch_; - // request的版本信息 + // Version information of request uint64_t seq_ = 0; - // 这个对应的GetChunkInfo的出参 + // The output parameter of this corresponding GetChunkInfo ChunkInfoDetail* chunkinfodetail_ = nullptr; - // clone chunk请求需要携带源chunk的location及所需要创建的chunk的大小 + // The clone chunk request needs to carry the location of the source chunk and the size of the chunk that needs to be created uint32_t chunksize_ = 0; std::string location_; RequestSourceInfo sourceInfo_; - // create clone chunk时候用于修改chunk的correctedSn + // CorrectedSn used to modify a chunk when creating a clone chunk uint64_t correctedSeq_ = 0; - // 当前request context id + // Current request context id uint64_t id_ = 0; static RequestContext* NewInitedRequestContext() { diff --git a/src/client/request_scheduler.cpp b/src/client/request_scheduler.cpp index e723126235..538fe0cb72 100644 --- a/src/client/request_scheduler.cpp +++ b/src/client/request_scheduler.cpp @@ -89,7 +89,7 @@ int RequestScheduler::Fini() { int RequestScheduler::ScheduleRequest( const std::vector& requests) { if (running_.load(std::memory_order_acquire)) { - /* TODO(wudemiao): 后期考虑 qos */ + /* TODO(wudemiao): Consider QoS in the later stage */ for (auto it : requests) { // skip the fake request if (!it->idinfo_.chunkExist) { @@ -126,14 +126,14 @@ int RequestScheduler::ReSchedule(RequestContext *request) { } void RequestScheduler::WakeupBlockQueueAtExit() { - // 在scheduler退出的时候要把队列的内容清空, 通知copyset client - // 当前操作是退出状态,copyset client会针对inflight RPC做响应处理 - // 正常情况下队列内容一定会在Fini调用结束之后全部清空 - // 但是在session刷新失败的时候,scheduler无法继续下发 - // RPC请求,所以需要设置blockingQueue_标志,告知scheduler - // 把队列里内容统统扔到copyset client,因为在session - // 续约失败后copyset client会将IO全部失败返回,scheduler - // 模块不需要处理具体RPC请求,由copyset client负责。 + // When the scheduler exits, it is necessary to clear the contents of the queue and notify the copyset client + // The current operation is in the exit state, and the copyset client will respond to the inflight RPC + // Under normal circumstances, the queue content must be completely cleared after Fini calls are completed + // But when the session refresh fails, the scheduler cannot continue issuing + // RPC request, therefore blockingQueue needs to be set_ Sign to inform scheduler + // Throw all the content in the queue to the copyset client because in the session + // After the renewal fails, the copyset client will return all IO failures to the scheduler + // The module does not need to handle specific RPC requests, and is the responsibility of the copyset client. client_.ResetExitFlag(); blockingQueue_ = false; std::atomic_thread_fence(std::memory_order_acquire); @@ -151,8 +151,8 @@ void RequestScheduler::Process() { ProcessOne(req); } else { /** - * 一旦遇到stop item,所有线程都可以退出,因为此时 - * queue里面所有的request都被处理完了 + * Once a stop item is encountered, all threads can exit because at this point + * All requests in the queue have been processed */ stop_.store(true, std::memory_order_release); } @@ -197,7 +197,7 @@ void RequestScheduler::ProcessOne(RequestContext* ctx) { guard.release()); break; default: - /* TODO(wudemiao) 后期整个链路错误发统一了在处理 */ + /* In the later stage of TODO(wudemiao), the entire link error was sent and processed uniformly */ ctx->done_->SetFailed(-1); LOG(ERROR) << "unknown op type: OpType::UNKNOWN"; } diff --git a/src/client/request_scheduler.h b/src/client/request_scheduler.h index 752f72bcb0..c76831b3d1 100644 --- a/src/client/request_scheduler.h +++ b/src/client/request_scheduler.h @@ -43,8 +43,8 @@ using curve::common::Uncopyable; struct RequestContext; /** - * 请求调度器,上层拆分的I/O会交给Scheduler的线程池 - * 分发到具体的ChunkServer,后期QoS也会放在这里处理 + * Request the scheduler, and the split I/O from the upper layer will be handed over to the scheduler's thread pool + * Distribute to specific ChunkServers, where QoS will also be handled in the future */ class RequestScheduler : public Uncopyable { public: @@ -56,57 +56,57 @@ class RequestScheduler : public Uncopyable { virtual ~RequestScheduler(); /** - * 初始化 - * @param: reqSchdulerOpt为scheduler的配置选项 - * @param: metacache为meta信息 - * @param: filematric为文件的metric信息 + * Initialize + * @param: reqSchdulerOpt is the configuration option for the scheduler + * @param: metacache is the meta information + * @param: filematric is the metric information of the file */ virtual int Init(const RequestScheduleOption& reqSchdulerOpt, MetaCache *metaCache, FileMetric* fileMetric = nullptr); /** - * 启动Scheduler的线程池开始处理request - * 启动之后才能push request,除此之外,只有当 - * queue里面的任务都被处理完了,才会Scheduler - * 的 thread pool里面的所有线程都退出 - * @return 0成功,-1失败 + * Start the Scheduler's thread pool to begin processing requests. + * Requests can only be pushed after starting. Furthermore, only when + * all tasks in the queue have been processed will all threads in the Scheduler's + * thread pool exit. + * @return 0 for success, -1 for failure */ virtual int Run(); /** - * Stop Scheduler,一旦调用了Fini,那么 - * 此Scheduler不再接收新的request - * @return 0成功,-1失败 + * Stop Scheduler, once Fini is called, then + * This scheduler no longer receives new requests + * @return 0 succeeded, -1 failed */ virtual int Fini(); /** - * 将request push到Scheduler处理 - * @param requests:请求列表 - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param requests: Request List + * @return 0 succeeded, -1 failed */ virtual int ScheduleRequest(const std::vector& requests); /** - * 将request push到Scheduler处理 - * @param request:一个request - * @return 0成功,-1失败 + * Push the request to the scheduler for processing + * @param request: A request + * @return 0 succeeded, -1 failed */ virtual int ScheduleRequest(RequestContext *request); /** - * 对于需要重新入队的RPC将其放在头部 + * For RPC that need to be re queued, place them at the top */ virtual int ReSchedule(RequestContext *request); /** - * 关闭scheduler之前如果队列在sessionnotvalid睡眠就将其唤醒 + * Before closing the scheduler, if the queue is in sessionnotvalid, wake it up */ virtual void WakeupBlockQueueAtExit(); /** - * 当LeaseExecutor续约失败的时候,调用LeaseTimeoutDisableIO - * 后续的IO调度会被阻塞 + * When LeaseExecutor renewal fails, call LeaseTimeoutDisableIO + * Subsequent IO scheduling will be blocked */ void LeaseTimeoutBlockIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -115,8 +115,8 @@ class RequestScheduler : public Uncopyable { } /** - * 当lease又续约成功的时候,LeaseExecutor调用该接口恢复IO, - * IO调度被恢复 + * When the lease is successfully renewed, the LeaseExecutor calls the interface to restore IO, + * IO scheduling restored */ void ResumeIO() { std::unique_lock lk(leaseRefreshmtx_); @@ -126,7 +126,7 @@ class RequestScheduler : public Uncopyable { } /** - * 测试使用,获取队列 + * For testing purposes, get the queue. */ BoundedBlockingDeque>* GetQueue() { return &queue_; @@ -134,14 +134,14 @@ class RequestScheduler : public Uncopyable { private: /** - * Thread pool的运行函数,会从queue中取request进行处理 + * The run function of the Thread pool will retrieve the request from the queue for processing */ void Process(); void ProcessOne(RequestContext* ctx); void WaitValidSession() { - // lease续约失败的时候需要阻塞IO直到续约成功 + // When the lease renewal fails, IO needs to be blocked until the renewal is successful if (blockIO_.load(std::memory_order_acquire) && blockingQueue_) { std::unique_lock lk(leaseRefreshmtx_); leaseRefreshcv_.wait(lk, [&]() -> bool { @@ -151,28 +151,28 @@ class RequestScheduler : public Uncopyable { } private: - // 线程池和queue容量的配置参数 + // Configuration parameters for thread pool and queue capacity RequestScheduleOption reqschopt_; - // 存放 request 的队列 + // Queue for storing request BoundedBlockingDeque> queue_; - // 处理 request 的线程池 + // Thread pool for processing request ThreadPool threadPool_; - // Scheduler 运行标记,只有运行了,才接收 request + // The running flag of the Scheduler, it only accepts requests when it's running std::atomic running_; - // stop thread pool 标记,当调用 Scheduler Fini - // 之后且 queue 里面的 request 都处理完了,就可以 - // 让所有处理线程退出了 + // stop thread pool flag, when calling Scheduler Fini + // After processing all the requests in the queue, you can proceed + // Let all processing threads exit std::atomic stop_; - // 访问复制组Chunk的客户端 + // Client accessing replication group Chunk CopysetClient client_; - // 续约失败,卡住IO + // Renewal failed, IO stuck std::atomic blockIO_; - // 此锁与LeaseRefreshcv_条件变量配合使用 - // 在leasee续约失败的时候,所有新下发的IO被阻塞直到续约成功 + // This lock is associated with LeaseRefreshcv_ Using Conditional Variables Together + // When lease renewal fails, all newly issued IO is blocked until the renewal is successful std::mutex leaseRefreshmtx_; - // 条件变量,用于唤醒和hang IO + // Conditional variables for wake-up and hang IO std::condition_variable leaseRefreshcv_; - // 阻塞队列 + // Blocking queue bool blockingQueue_; }; diff --git a/src/client/request_sender.h b/src/client/request_sender.h index f288160267..a53c777270 100644 --- a/src/client/request_sender.h +++ b/src/client/request_sender.h @@ -39,8 +39,8 @@ namespace curve { namespace client { /** - * 一个RequestSender负责管理一个ChunkServer的所有 - * connection,目前一个ChunkServer仅有一个connection + * A RequestSender is responsible for managing all aspects of a ChunkServer + * Connection, currently there is only one connection for a ChunkServer */ class RequestSender { public: @@ -54,13 +54,13 @@ class RequestSender { int Init(const IOSenderOption& ioSenderOpt); /** - * 读Chunk - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure + * Reading Chunk + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer */ int ReadChunk(const ChunkIDInfo& idinfo, uint64_t sn, @@ -70,16 +70,16 @@ class RequestSender { ClientClosure *done); /** - * 写Chunk - * @param idinfo为chunk相关的id信息 + * Write Chunk + * @param IDInfo is the ID information related to chunk * @param fileId: file id * @param epoch: file epoch - * @param sn:文件版本号 - * @param data 要写入的数据 - *@param offset:写的偏移 - * @param length:写的长度 - * @param sourceInfo 数据源信息 - * @param done:上一层异步回调的closure + * @param sn: File version number + * @param data The data to be written + * @param offset: write offset + * @param length: The length written + * @param sourceInfo Data source information + * @param done: closure of asynchronous callback on the previous layer */ int WriteChunk(const ChunkIDInfo& idinfo, uint64_t fileId, @@ -92,12 +92,12 @@ class RequestSender { ClientClosure *done); /** - * 读Chunk快照文件 - * @param idinfo为chunk相关的id信息 - * @param sn:文件版本号 - * @param offset:读的偏移 - * @param length:读的长度 - * @param done:上一层异步回调的closure + * Reading Chunk snapshot files + * @param IDInfo is the ID information related to chunk + * @param sn: File version number + * @param offset: Read offset + * @param length: Read length + * @param done: closure of asynchronous callback on the previous layer */ int ReadChunkSnapshot(const ChunkIDInfo& idinfo, uint64_t sn, @@ -106,41 +106,41 @@ class RequestSender { ClientClosure *done); /** - * 删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param idinfo为chunk相关的id信息 - * @param correctedSn:chunk需要修正的版本号 - * @param done:上一层异步回调的closure + * Delete snapshots generated during this dump or left over from history + * If no snapshot is generated during the dump process, modify the correctedSn of the chunk + * @param IDInfo is the ID information related to chunk + * @param correctedSn: Chunk The version number that needs to be corrected + * @param done: closure of asynchronous callback on the previous layer */ int DeleteChunkSnapshotOrCorrectSn(const ChunkIDInfo& idinfo, uint64_t correctedSn, ClientClosure *done); /** - * 获取chunk文件的信息 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param retriedTimes:已经重试了几次 + * Obtain information about chunk files + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param retriedTimes: Number of retries */ int GetChunkInfo(const ChunkIDInfo& idinfo, ClientClosure *done); /** - * @brief lazy 创建clone chunk + * @brief lazy Create clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs + * - The format definition of a location is A@B The form of. + * - If the source data is on s3, the location format is uri@s3 Uri is the address of the actual chunk object; + * - If the source data is on curves, the location format is/filename/chunkindex@cs * - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:location 数据源的url - * @param:sn chunk的序列号 - * @param:correntSn CreateCloneChunk时候用于修改chunk的correctedSn - * @param:chunkSize chunk的大小 - * @param retriedTimes:已经重试了几次 + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: location, URL of the data source + * @param: sn chunk's serial number + * @param: correntSn used to modify the chunk when creating CloneChunk + * @param: chunkSize Chunk size + * @param retriedTimes: Number of retries * - * @return 错误码 + * @return error code */ int CreateCloneChunk(const ChunkIDInfo& idinfo, ClientClosure *done, @@ -150,22 +150,22 @@ class RequestSender { uint64_t chunkSize); /** - * @brief 实际恢复chunk数据 - * @param idinfo为chunk相关的id信息 - * @param done:上一层异步回调的closure - * @param:offset 偏移 - * @param:len 长度 - * @param retriedTimes:已经重试了几次 + * @brief Actual recovery chunk data + * @param IDInfo is the ID information related to chunk + * @param done: closure of asynchronous callback on the previous layer + * @param: offset: offset + * @param: len: length + * @param retriedTimes: Number of retries * - * @return 错误码 + * @return error code */ int RecoverChunk(const ChunkIDInfo& idinfo, ClientClosure* done, uint64_t offset, uint64_t len); /** - * 重置和Chunk Server的链接 - * @param chunkServerId:Chunk Server唯一标识 - * @param serverEndPoint:Chunk Server - * @return 0成功,-1失败 + * Reset Link to Chunk Server + * @param chunkServerId: Chunk Server unique identifier + * @param serverEndPoint: Chunk Server + * @return 0 succeeded, -1 failed */ int ResetSender(ChunkServerID chunkServerId, butil::EndPoint serverEndPoint); @@ -181,13 +181,13 @@ class RequestSender { google::protobuf::Message* rpcResponse) const; private: - // Rpc stub配置 + // Rpc stub configuration IOSenderOption iosenderopt_; - // ChunkServer 的唯一标识 id + // The unique identification ID of ChunkServer ChunkServerID chunkServerId_; - // ChunkServer 的地址 + // Address of ChunkServer butil::EndPoint serverEndPoint_; - brpc::Channel channel_; /* TODO(wudemiao): 后期会维护多个 channel */ + brpc::Channel channel_; /* TODO(wudemiao): Multiple channels will be maintained in the later stage */ }; } // namespace client diff --git a/src/client/request_sender_manager.cpp b/src/client/request_sender_manager.cpp index a5c77a793f..2407e373a3 100644 --- a/src/client/request_sender_manager.cpp +++ b/src/client/request_sender_manager.cpp @@ -66,7 +66,7 @@ void RequestSenderManager::ResetSenderIfNotHealth(const ChunkServerID& csId) { return; } - // 检查是否健康 + // Check for health if (iter->second->IsSocketHealth()) { return; } diff --git a/src/client/request_sender_manager.h b/src/client/request_sender_manager.h index 530d8c1c82..e518fed63b 100644 --- a/src/client/request_sender_manager.h +++ b/src/client/request_sender_manager.h @@ -38,8 +38,8 @@ using curve::common::Uncopyable; class RequestSender; /** - * 所有Chunk Server的request sender管理者, - * 可以理解为Chunk Server的链接管理者 + * Request sender managers for all Chunk Servers, + * It can be understood as the link manager of Chunk Server */ class RequestSenderManager : public Uncopyable { public: @@ -47,26 +47,26 @@ class RequestSenderManager : public Uncopyable { RequestSenderManager() : rwlock_(), senderPool_() {} /** - * 获取指定leader id的sender,如果没有则根据leader - * 地址,创建新的 sender并返回 - * @param leaderId:leader的id - * @param leaderAddr:leader的地址 - * @return nullptr:get或者create失败,否则成功 + * Obtain the sender with the specified leader ID, if not, based on the leader + * Address, create a new sender and return + * @param leaderId: The ID of the leader + * @param leaderAddr: The address of the leader + * @return nullptr: Get or create failed, otherwise successful */ SenderPtr GetOrCreateSender(const ChunkServerID& leaderId, const butil::EndPoint& leaderAddr, const IOSenderOption& senderopt); /** - * @brief 如果csId对应的RequestSender不健康,就进行重置 + * @brief If the RequestSender corresponding to csId is not healthy, reset it * @param csId chunkserver id */ void ResetSenderIfNotHealth(const ChunkServerID& csId); private: - // 读写锁,保护senderPool_ + // Read write lock to protect senderPool_ curve::common::BthreadRWLock rwlock_; - // 请求发送链接的map,以ChunkServer ID为key + // Request to send a map for the link, with ChunkServer ID as the key std::unordered_map senderPool_; }; diff --git a/src/client/service_helper.cpp b/src/client/service_helper.cpp index 70a7be6e34..68d2d55d02 100644 --- a/src/client/service_helper.cpp +++ b/src/client/service_helper.cpp @@ -171,10 +171,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_(false) {} /** - * @brief 等待GetLeader返回结果 - * @param[out] leaderId leader的id - * @param[out] leaderAddr leader的ip地址 - * @return 0 成功 / -1 失败 + * @brief waiting for GetLeader to return results + * @param[out] leaderId The ID of the leader + * @param[out] leaderAddr The IP address of the leader + * @return 0 successful/-1 failed */ int Wait(ChunkServerID* leaderId, PeerAddr* leaderAddr) { { @@ -212,11 +212,11 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 发起GetLeader请求 - * @param peerAddresses 除当前leader以外的peer地址 - * @param logicPoolId getleader请求的logicpool id - * @param copysetId getleader请求的copyset id - * @param fileMetric metric统计 + * @brief initiates GetLeader request + * @param peerAddresses Peer addresses other than the current leader + * @param logicPoolId getleader requested logicpool ID + * @param copysetId getleader requested copyset id + * @param fileMetric metric statistics */ void StartGetLeader(const std::unordered_set& peerAddresses, const GetLeaderRpcOption& rpcOption, @@ -270,10 +270,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { } /** - * @brief 处理异步请求结果 - * @param callId rpc请求id - * @param success rpc请求是否成功 - * @param peer rpc请求返回的leader信息 + * @brief processing asynchronous request results + * @param callId rpc request id + * @param success rpc request successful + * @param peer The leader information returned by the rpc request */ void HandleResponse(brpc::CallId callId, bool success, const curve::common::Peer& peer) { @@ -289,7 +289,7 @@ class GetLeaderProxy : public std::enable_shared_from_this { continue; } - // cancel以后,后续的rpc请求回调仍然会执行,但是会标记为失败 + // After canceling, subsequent rpc request callbacks will still be executed, but will be marked as failed brpc::StartCancel(id); } @@ -301,10 +301,10 @@ class GetLeaderProxy : public std::enable_shared_from_this { success_ = true; finishCv_.notify_one(); } else { - // 删除当前call id + // Delete the current call id callIds_.erase(callId); - // 如果为空,说明是最后一个rpc返回,需要标记请求失败,并向上返回 + // If it is empty, it indicates that it is the last rpc returned, and the request needs to be marked as failed and returned upwards if (callIds_.empty()) { std::lock_guard ulk(finishMtx_); finish_ = true; @@ -317,24 +317,24 @@ class GetLeaderProxy : public std::enable_shared_from_this { private: uint64_t proxyId_; - // 是否完成请求 - // 1. 其中一个请求成功 - // 2. 最后一个请求返回 - // 都会标记为true + //Whether to complete the request + // 1. One of the requests was successful + // 2. Last request returned + //Will be marked as true bool finish_; bthread::ConditionVariable finishCv_; bthread::Mutex finishMtx_; - // 记录cntl id + // Record cntl id std::set callIds_; - // 请求是否成功 + // Is the request successful bool success_; - // leader信息 + // leader Information curve::common::Peer leader_; - // 保护callIds_/success_,避免异步rpc回调同时操作 + // Protect callIds_/success_, Avoiding asynchronous rpc callbacks from operating simultaneously bthread::Mutex mtx_; LogicPoolID logicPooldId_; @@ -437,7 +437,7 @@ int ServiceHelper::CheckChunkServerHealth( return -1; } - // 访问 ip:port/health + // Accessing ip:port/health cntl.http_request().uri() = ipPort + "/health"; cntl.set_timeout_ms(requestTimeoutMs); httpChannel.CallMethod(nullptr, &cntl, nullptr, nullptr, nullptr); diff --git a/src/client/service_helper.h b/src/client/service_helper.h index 279c6a17f5..a06a191bf0 100644 --- a/src/client/service_helper.h +++ b/src/client/service_helper.h @@ -40,7 +40,7 @@ namespace curve { namespace client { -// GetLeader请求rpc参数信息 +// GetLeader request rpc parameter information struct GetLeaderRpcOption { uint32_t rpcTimeoutMs; @@ -48,7 +48,7 @@ struct GetLeaderRpcOption { : rpcTimeoutMs(rpcTimeoutMs) {} }; -// GetLeader请求对应的copyset信息及rpc相关参数信息 +// The copyset information and rpc related parameter information corresponding to the GetLeader request struct GetLeaderInfo { LogicPoolID logicPoolId; CopysetID copysetId; @@ -70,7 +70,7 @@ struct GetLeaderInfo { class GetLeaderProxy; -// GetLeader异步请求回调 +// GetLeader asynchronous request callback struct GetLeaderClosure : public google::protobuf::Closure { GetLeaderClosure(LogicPoolID logicPoolId, CopysetID copysetId, std::shared_ptr proxy) @@ -86,7 +86,7 @@ struct GetLeaderClosure : public google::protobuf::Closure { curve::chunkserver::GetLeaderResponse2 response; }; -// ServiceHelper是client端RPC服务的一些工具 +// ServiceHelper is a tool for client-side RPC services class ServiceHelper { public: /** @@ -103,38 +103,38 @@ class ServiceHelper { CloneSourceInfo* info); /** - * 从chunkserver端获取最新的leader信息 - * @param[in]: getLeaderInfo为对应copyset的信息 - * @param[out]: leaderAddr是出参,返回当前copyset的leader信息 - * @param[out]: leaderId是出参,返回当前leader的id信息 - * @param[in]: fileMetric是用于metric的记录 - * @return: 成功返回0,否则返回-1 + * Obtain the latest leader information from the chunkserver side + * @param[in]: getLeaderInfo is the information of the corresponding copyset + * @param[out]: leaderAddr is the output parameter that returns the leader information of the current copyset + * @param[out]: leaderId is the output parameter, returning the ID information of the current leader + * @param[in]: fileMetric is a record used for metric + * @return: Successfully returns 0, otherwise returns -1 */ static int GetLeader(const GetLeaderInfo& getLeaderInfo, PeerAddr *leaderAddr, ChunkServerID* leaderId = nullptr, FileMetric* fileMetric = nullptr); /** - * 从文件名中获取user信息. - * 用户的user信息需要夹在文件名中,比如文件名为temp,用户名为user, - * 那么其完整的文件信息是:temp_user_。 - * 如果文件名为: /temp_temp_,那么完整文件名为/temp_temp__user_。 - * @param[in]: filename为用户传下来的文件名 - * @param[out]:realfilename是真正文件名 - * @param[out]: user信息,出参 - * @return: 获取到user信息为true,否则false + * Obtain user information from the file name + * The user information needs to be included in the file name, such as the file name being temp and the username being user, + * So the complete file information is: temp_user_. + * If the file name is: /temp_temp_, So the complete file name is /temp_temp__user_. + * @param[in]: filename is the file name passed down by the user + * @param[out]: realfilename is the true file name + * @param[out]: user information, output parameters + * @return: Obtained user information as true, otherwise false */ static bool GetUserInfoFromFilename(const std::string& fname, std::string* realfilename, std::string* user); /** - * @brief: 发送http请求,判断chunkserver是否健康 + * @brief: Send an HTTP request to determine if the chunkserver is healthy * - * @param: endPoint chunkserver的ip:port - * @param: http请求的超时时间 + * @param: endPoint chunkserver's ip:port + * @param: HTTP request timeout * - * @return: 0 表示健康,-1表示不健康 + * @return: 0 indicates health, -1 indicates unhealthy */ static int CheckChunkServerHealth(const butil::EndPoint& endPoint, int32_t requestTimeoutMs); diff --git a/src/client/splitor.h b/src/client/splitor.h index eaffa27a62..4379d2b23b 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -46,16 +46,16 @@ class Splitor { static void Init(const IOSplitOption& ioSplitOpt); /** - * 用户IO拆分成Chunk级别的IO - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: FileEpoch_t file epoch info + * Split user IO into Chunk level IO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is split + * @param: data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as chunksize, etc + * @param: FileEpoch_t file epoch information */ static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, @@ -68,15 +68,15 @@ class Splitor { const FileEpoch_t* fEpoch); /** - * 对单ChunkIO进行细粒度拆分 - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: cid是当前chunk的ID信息 - * @param: data是待写的数据 - * @param: offset是当前chunk内的偏移 - * @param: length数据长度 - * @param: seq是当前chunk的版本号 + * Fine grained splitting of single ChunkIO + * @param: iotracker Big IO Context Information + * @param: metaCache is the cache information that needs to be used during the IO splitting process + * @param: targetlist The storage list of small IO after the large IO is split + * @param: cid is the ID information of the current chunk + * @param: data is the data to be written + * @param: offset is the offset within the current chunk + * @param: length Data length + * @param: seq is the version number of the current chunk */ static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, @@ -88,11 +88,11 @@ class Splitor { uint64_t seq); /** - * @brief 计算请求的location信息 - * @param ioTracker io上下文信息 - * @param metaCache 文件缓存信息 - * @param chunkIdx 当前chunk信息 - * @return source信息 + * @brief calculates the location information of the request + * @param ioTracker io Context Information + * @param metaCache file cache information + * @param chunkIdx Current chunk information + * @return source information */ static RequestSourceInfo CalcRequestSourceInfo(IOTracker* ioTracker, MetaCache* metaCache, @@ -105,16 +105,16 @@ class Splitor { private: /** - * IO2ChunkRequests内部会调用这个函数,进行真正的拆分操作 - * @param: iotracker大IO上下文信息 - * @param: mc是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: chunkidx是当前chunk在vdisk中的索引值 + * IO2ChunkRequests will internally call this function for actual splitting operations + * @param: iotracker Big IO Context Information + * @param: mc is the cache information that needs to be used during IO splitting process + * @param: targetlist The storage list of small IO after the large IO is split + * @param: Data is the data to be written + * @param: offset The actual offset of IO issued by the user + * @param: length Data length + * @param: mdsclient searches for information through mdsclient when searching for metaahe fails + * @param: fi stores some basic information about the current IO, such as chunksize, etc + * @param: chunkidx is the index value of the current chunk in the vdisk */ static bool AssignInternal(IOTracker* iotracker, MetaCache* metaCache, @@ -154,7 +154,7 @@ class Splitor { uint64_t len); private: - // IO拆分模块所使用的配置信息 + // Configuration information used for IO split modules static IOSplitOption iosplitopt_; }; } // namespace client diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 5cc99945fe..fd66c577bd 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -30,7 +30,7 @@ UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); - // 如果当前ip已经超过阈值,则直接返回chunkserver unstable + // If the current IP has exceeded the threshold, it will directly return chunkserver unstable uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); diff --git a/src/client/unstable_helper.h b/src/client/unstable_helper.h index 89cc22be8e..a2c4aa155f 100644 --- a/src/client/unstable_helper.h +++ b/src/client/unstable_helper.h @@ -41,14 +41,14 @@ enum class UnstableState { ServerUnstable }; -// 如果chunkserver宕机或者网络不可达, 发往对应chunkserver的rpc会超时 -// 返回之后, 回去refresh leader然后再去发送请求 -// 这种情况下不同copyset上的请求,总会先rpc timedout然后重新refresh leader -// 为了避免一次多余的rpc timedout -// 记录一下发往同一个chunkserver上超时请求的次数 -// 如果超过一定的阈值,会发送http请求检查chunkserver是否健康 -// 如果不健康,则通知所有leader在这台chunkserver上的copyset -// 主动去refresh leader,而不是根据缓存的leader信息直接发送rpc +// If the chunkserver goes down or the network is unreachable, the rpc sent to the corresponding chunkserver will time out +// After returning, go back to the refresh leader and then send the request +// In this case, requests on different copysets will always first rpc timeout and then refresh the leader again +// To avoid a redundant rpc timeout +// Record the number of timeout requests sent to the same chunkserver +// If the threshold is exceeded, an HTTP request will be sent to check if the chunkserver is healthy +// If not healthy, notify all leaders of the copyset on this chunkserver +// Actively refresh the leader instead of directly sending rpc based on cached leader information class UnstableHelper { public: UnstableHelper() = default; @@ -78,10 +78,10 @@ class UnstableHelper { private: /** - * @brief 检查chunkserver状态 + * @brief Check chunkserver status * - * @param: endPoint chunkserver的ip:port地址 - * @return: true 健康 / false 不健康 + * @param: endPoint, ip:port address of endPoint chunkserver + * @return: true healthy/false unhealthy */ bool CheckChunkServerHealth(const butil::EndPoint& endPoint) const { return ServiceHelper::CheckChunkServerHealth( @@ -92,10 +92,10 @@ class UnstableHelper { bthread::Mutex mtx_; - // 同一chunkserver连续超时请求次数 + // Number of consecutive timeout requests for the same chunkserver std::unordered_map timeoutTimes_; - // 同一server上unstable chunkserver的id + // The ID of an unstable chunkserver on the same server std::unordered_map> serverUnstabledChunkservers_; }; diff --git a/src/common/authenticator.h b/src/common/authenticator.h index 7d9ba319c3..7646bf8b77 100644 --- a/src/common/authenticator.h +++ b/src/common/authenticator.h @@ -30,19 +30,19 @@ namespace common { class Authenticator { public: /** - * bref: 获取要进行签名的字符串 - * @param: date, 当前的时间 - * @param: owner, 文件所有者 - * @return: 返回需要进行加密的字符串 + * bref: Get the string to be signed + * @param: date, current time + * @param: owner, file owner + * @return: Returns the string that needs to be encrypted */ static std::string GetString2Signature(uint64_t date, const std::string& owner); /** - * bref: 为字符串计算签名 - * @param: String2Signature, 需要进行签名计算的字符串 - * @param: secretKey, 为计算的秘钥 - * @return: 返回需要进行签名过后的字符串 + * bref: Calculate signature for string + * @param: String2Signature, a string that requires signature calculation + * @param: secretKey, which is the calculated secret key + * @return: Returns the string that needs to be signed */ static std::string CalcString2Signature(const std::string& String2Signature, const std::string& secretKey); diff --git a/src/common/bitmap.cpp b/src/common/bitmap.cpp index dbff32702c..8017e0ec18 100644 --- a/src/common/bitmap.cpp +++ b/src/common/bitmap.cpp @@ -179,9 +179,9 @@ uint32_t Bitmap::NextSetBit(uint32_t index) const { uint32_t Bitmap::NextSetBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; - // bitmap中最后一个bit的index值 + // The index value of the last bit in the bitmap uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { @@ -206,7 +206,7 @@ uint32_t Bitmap::NextClearBit(uint32_t index) const { uint32_t Bitmap::NextClearBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { @@ -222,11 +222,11 @@ void Bitmap::Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const { - // endIndex的值不能小于startIndex + // The value of endIndex cannot be less than startIndex if (endIndex < startIndex) return; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex uint32_t lastIndex = bits_ - 1; if (endIndex > lastIndex) endIndex = lastIndex; @@ -235,20 +235,20 @@ void Bitmap::Divide(uint32_t startIndex, BitRange setRange; vector tmpClearRanges; vector tmpSetRanges; - // 下一个位为0的index + // Next index with 0 bits uint32_t nextClearIndex; - // 下一个位为1的index + // Next index with bit 1 uint32_t nextSetIndex; - // 划分所有range + // Divide all ranges while (startIndex != NO_POS) { nextClearIndex = NextClearBit(startIndex, endIndex); - // 1.存放当前clear index之前的 set range - // nextClearIndex如果等于startIndex说明前面没有 set range + // 1. Store the set range before the current clear index + // If nextClearIndex is equal to startIndex, it indicates that there is no set range before it if (nextClearIndex != startIndex) { setRange.beginIndex = startIndex; - // nextClearIndex等于NO_POS说明已经找到末尾 - // 最后一块连续区域是 set range + // nextClearIndex equals NO_POS description has found the end + // The last continuous area is set range setRange.endIndex = nextClearIndex == NO_POS ? endIndex : nextClearIndex - 1; @@ -258,8 +258,8 @@ void Bitmap::Divide(uint32_t startIndex, break; nextSetIndex = NextSetBit(nextClearIndex, endIndex); - // 2.存放当前set index之前的 clear range - // 能到这一步说明前面肯定存在clear range,所以不用像第1步一样做判断 + // 2. Store the clear range before the current set index + // Being able to reach this step indicates that there must be a clear range ahead, so there is no need to make a judgment like in step 1 clearRange.beginIndex = nextClearIndex; clearRange.endIndex = nextSetIndex == NO_POS ? endIndex @@ -268,7 +268,7 @@ void Bitmap::Divide(uint32_t startIndex, startIndex = nextSetIndex; } - // 根据参数中的clearRanges和setRanges指针是否为空返回结果 + // Returns a result based on whether the clearRanges and setRanges pointers in the parameters are empty if (clearRanges != nullptr) { *clearRanges = std::move(tmpClearRanges); } diff --git a/src/common/bitmap.h b/src/common/bitmap.h index e7a0e1270d..f7d55c2ab0 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -36,12 +36,12 @@ const int BITMAP_UNIT_SIZE = 8; const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE /** - * 表示bitmap中的一段连续区域,为闭区间 + * Represents a continuous region in a bitmap, which is a closed interval */ struct BitRange { - // 连续区域起始位置在bitmap中的索引 + // Index of the starting position of a continuous region in Bitmap uint32_t beginIndex; - // 连续区域结束位置在bitmap中的索引 + // Index of the end position of a continuous region in Bitmap uint32_t endIndex; }; @@ -51,15 +51,15 @@ std::string BitRangeVecToString(const std::vector &ranges); class Bitmap { public: /** - * 新建bitmap时的构造函数 - * @param bits: 要构造的bitmap的位数 + * Constructor when creating a new bitmap + * @param bits: The number of bits to construct the bitmap */ explicit Bitmap(uint32_t bits); /** - * 从已有的快照文件初始化时的构造函数 - * 构造函数内部会再new一个新的bitmap,然后从参数中的bitmap memcpy过去 - * @param bits: bitmap的位数 - * @param bitmap: 外部提供的用于初始化的bitmap + * Constructor when initializing from an existing snapshot file + *The constructor will create a new bitmap internally, and then use the bitmap memcpy in the parameters + * @param bits: Bitmap bits + * @param bitmap: An externally provided bitmap for initialization */ explicit Bitmap(uint32_t bits, const char* bitmap); @@ -70,14 +70,14 @@ class Bitmap { ~Bitmap(); /** - * 拷贝构造,使用深拷贝 - * @param bitmap:从该对象拷贝内容 + * Copy construction, using deep copy + * @param bitmap: Copy content from this object */ Bitmap(const Bitmap& bitmap); /** - * 赋值函数,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - * @reutrn:返回拷贝后对象引用 + *Assignment function, using deep copy + * @param bitmap: Copy content from this object + * @reutrn: Returns the copied object reference */ Bitmap& operator = (const Bitmap& bitmap); @@ -85,114 +85,114 @@ class Bitmap { Bitmap& operator=(Bitmap&& other) noexcept; /** - * 比较两个bitmap是否相同 - * @param bitmap:待比较的bitmap - * @return:如果相同返回true,如果不同返回false + * Compare whether two bitmaps are the same + * @param bitmap: Bitmap to be compared + * @return: Returns true if the same, false if different */ bool operator == (const Bitmap& bitmap) const; /** - * 比较两个bitmap是否不同 - * @param bitmap:待比较的bitmap - * @return:如果不同返回true,如果相同返回false + * Compare whether two bitmaps are different + * @param bitmap: Bitmap to be compared + * @return: Returns true if different, false if the same */ bool operator != (const Bitmap& bitmap) const; /** - * 将所有位置1 + * Place all positions 1 */ void Set(); /** - * 将指定位置1 - * @param index: 指定位的位置 + * Specify position 1 + * @param index: Refers to the location of the positioning */ void Set(uint32_t index); /** - * 将指定范围的位置为1 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 1 + * @param startIndex: The starting position of the range, including this position + * @param endIndex: The end position of the range, including this position */ void Set(uint32_t startIndex, uint32_t endIndex); /** - * 将所有位置0 + * Move all positions to 0 */ void Clear(); /** - * 将指定位置0 - * @param index: 指定位的位置 + * Will specify position 0 + * @param index: Refers to the location of the positioning */ void Clear(uint32_t index); /** - * 将指定范围的位置为0 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 0 + * @param startIndex: The starting position of the range, including this position + * @param endIndex: The end position of the range, including this position */ void Clear(uint32_t startIndex, uint32_t endIndex); /** - * 获取指定位置位的状态 - * @param index: 指定位的位置 - * @return: true表示当前位状态为1,false表示为0 + * Obtain the status of the specified position bit + * @param index: Refers to the location of the positioning + * @return: true indicates that the current bit status is 1, while false indicates that it is 0 */ bool Test(uint32_t index) const; /** - * 获取指定位置及之后的首个位为1的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为1的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit is 1 + * @param index: Refers to the location of the positioning, including this location + * @return: The position where the first bit is 1. If it does not exist, return NO_POS */ uint32_t NextSetBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为1的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为1的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start position and end position is 1 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 1. If it does not exist within the specified range, return NO_POS */ uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 获取指定位置及之后的首个位为0的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为0的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit is 0 + * @param index: Refers to the location of the positioning, including this location + * @return: The position where the first bit is 0. If it does not exist, return NO_POS */ uint32_t NextClearBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为0的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为0的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start position and end position is 0 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 0. If it does not exist within the specified range, return NO_POS */ uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 将bitmap的指定区域分割成若干连续区域,划分依据为位状态,连续区域内的位状态一致 - * 例如:00011100会被划分为三个区域,[0,2]、[3,5]、[6,7] - * @param startIndex: 指定区域的起始索引 - * @param endIndex: 指定范围的结束索引 - * @param clearRanges: 存放位状态为0的连续区域的向量,可以指定为nullptr - * @param setRanges: 存放位状态为1的连续区域的向量,可以指定为nullptr + * Divide the designated area of the bitmap into several continuous areas based on bit states, with consistent bit states within the continuous areas + * For example, 00011100 will be divided into three regions: [0,2], [3,5], [6,7] + * @param startIndex: The starting index of the specified region + * @param endIndex: The end index of the specified range + * @param clearRanges: A vector that stores a continuous region with a bit state of 0, which can be specified as nullptr + * @param setRanges: A vector that stores a continuous region with a bit state of 1, which can be specified as nullptr */ void Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const; /** - * bitmap的有效位数 - * @return: 返回位数 + * Bitmap's significant digits + * @return: Returns the number of digits */ uint32_t Size() const; /** - * 获取bitmap的内存指针,用于持久化bitmap - * @return: bitmap的内存指针 + * Obtain a memory pointer to Bitmap for persisting Bitmap + * @return: Memory pointer to bitmap */ const char* GetBitmap() const; private: - // bitmap的字节数 + // Bytes of bitmap int unitCount() const { - // 同 (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE + // Same as (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; } - // 指定位置的bit在其所在字节中的偏移 + // The offset of the bit at the specified position in its byte int indexOfUnit(uint32_t index) const { - // 同 index / BITMAP_UNIT_SIZE + // Same as index / BITMAP_UNIT_SIZE return index >> ALIGN_FACTOR; } - // 逻辑计算掩码值 + // Logical calculation mask value char mask(uint32_t index) const { int indexInUnit = index % BITMAP_UNIT_SIZE; char mask = 0x01 << indexInUnit; @@ -200,7 +200,7 @@ class Bitmap { } public: - // 表示不存在的位置,值为0xffffffff + // Represents a non-existent position, with a value of 0xffffffff static const uint32_t NO_POS; private: diff --git a/src/common/channel_pool.h b/src/common/channel_pool.h index 458baa33d3..bad9efe77d 100644 --- a/src/common/channel_pool.h +++ b/src/common/channel_pool.h @@ -39,18 +39,18 @@ namespace common { class ChannelPool { public: /** - * @brief 从channelMap获取或创建并Init到指定地址的channel + * @brief Obtain or create a channel from channelMap and Init it to the specified address * - * @param addr 对端的地址 - * @param[out] channelPtr 到指定地址的channel + * @param addr The address of the opposite end + * @param[out] channelPtr to the specified channel address * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ int GetOrInitChannel(const std::string& addr, ChannelPtr* channelPtr); /** - * @brief 清空map + * @brief Clear map */ void Clear(); diff --git a/src/common/concurrent/bounded_blocking_queue.h b/src/common/concurrent/bounded_blocking_queue.h index 56c59fcfc1..bed0b4dede 100644 --- a/src/common/concurrent/bounded_blocking_queue.h +++ b/src/common/concurrent/bounded_blocking_queue.h @@ -70,7 +70,7 @@ class BBQItem { }; /** - * 有 capacity 限制的阻塞队列,线程安全 + * Blocking queues with capacity restrictions, thread safe */ template class BoundedBlockingDeque : public Uncopyable { diff --git a/src/common/concurrent/concurrent.h b/src/common/concurrent/concurrent.h index df79ea8ec8..236799f2c9 100644 --- a/src/common/concurrent/concurrent.h +++ b/src/common/concurrent/concurrent.h @@ -38,7 +38,7 @@ namespace curve { namespace common { -// curve公共组件命名空间替换 +// curve public component namespace replacement template using Atomic = std::atomic; using Mutex = std::mutex; @@ -47,13 +47,13 @@ using LockGuard = std::lock_guard; using UniqueLock = std::unique_lock; using ConditionVariable = std::condition_variable; -// curve内部定义的锁组件 +// Lock components defined internally in curve using RWLock = RWLock; using SpinLock = SpinLock; using ReadLockGuard = ReadLockGuard; using WriteLockGuard = WriteLockGuard; -// curve内部定义的线程组件 +// Thread components defined internally in curve using TaskQueue = TaskQueue; using ThreadPool = ThreadPool; diff --git a/src/common/concurrent/count_down_event.h b/src/common/concurrent/count_down_event.h index bfce259351..564750ac62 100644 --- a/src/common/concurrent/count_down_event.h +++ b/src/common/concurrent/count_down_event.h @@ -31,10 +31,10 @@ namespace curve { namespace common { /** - * 用于线程间同步,CountDownEvent是通过一个计数器来实现的,计数器的 - * 初始值initCnt为需要等待event的总数,通过接口Wait等待。每当一个 - * event发生,就会调用Signal接口,让计数器的值就会减 1。当计数器值到 - * 达0时,则Wait等待就会结束。一般用于等待一些事件发生 + * Used for inter-thread synchronization, CountDownEvent is implemented using a counter + * with an initial value (initCnt) representing the total number of events to wait for. + * Threads can wait for events using the Wait interface. Each time an event occurs, the Signal interface is called, + * decrementing the counter by 1. When the counter reaches 0, the waiting in Wait will conclude. It is typically used to wait for certain events to occur. */ class CountDownEvent { public: @@ -51,8 +51,8 @@ class CountDownEvent { } /** - * 重新设置event计数 - * @param eventCount:事件计数 + * Reset event count + * @param eventCount: Event Count */ void Reset(int eventCount) { std::unique_lock guard(mutex_); @@ -60,7 +60,7 @@ class CountDownEvent { } /** - * 通知wait event发生了一次,计数减1 + * Notify that a wait event has occurred once, count minus 1 */ void Signal() { std::unique_lock guard(mutex_); @@ -71,7 +71,7 @@ class CountDownEvent { } /** - * 等待initCnt的event发生之后,再唤醒 + * Wait for the event of initCnt to occur before waking up */ void Wait() { std::unique_lock guard(mutex_); @@ -81,9 +81,9 @@ class CountDownEvent { } /** - * 等待initCnt的event发生,或者指定时长 - * @param waitMs: 等待的ms数 - * @return:如果所有等待的event都发生,那么就返回true,否则false + * Wait for the event of initCnt to occur, or specify a duration + * @param waitMs: Number of ms waiting + * @return: If all waiting events occur, then return true; otherwise, false */ bool WaitFor(int waitMs) { std::unique_lock guard(mutex_); @@ -92,7 +92,7 @@ class CountDownEvent { while (count_ > 0) { auto now = std::chrono::high_resolution_clock::now(); std::chrono::duration elapsed = now - start; - // 计算还剩余多少时间 + // Calculate how much time is left int leftMs = waitMs - static_cast(elapsed.count()); if (leftMs > 0) { auto ret = cond_.wait_for(guard, @@ -113,7 +113,7 @@ class CountDownEvent { private: mutable std::mutex mutex_; std::condition_variable cond_; - // 需要等待的事件计数 + // Count of events to wait for int count_; }; diff --git a/src/common/concurrent/task_thread_pool.h b/src/common/concurrent/task_thread_pool.h index b9b23eebe3..023e604f68 100644 --- a/src/common/concurrent/task_thread_pool.h +++ b/src/common/concurrent/task_thread_pool.h @@ -43,7 +43,7 @@ namespace common { using Task = std::function; -// 异步运行回调的线程池 +// Thread pool for asynchronously running callbacks template class TaskThreadPool : public Uncopyable { @@ -58,9 +58,9 @@ class TaskThreadPool : public Uncopyable { } /** - * 启动一个线程池 - * @param numThreads 线程池的线程数量,必须大于 0,不设置就是 INT_MAX (不推荐) - * @param queueCapacity queue 的容量,必须大于 0 + * Start a thread pool + * @param numThreads The number of threads in the thread pool must be greater than 0, otherwise it is INT_ MAX (not recommended) + * @param queueCapacity The capacity of queue must be greater than 0 * @return */ int Start(int numThreads, int queueCapacity = INT_MAX) { @@ -86,7 +86,7 @@ class TaskThreadPool : public Uncopyable { } /** - * 关闭线程池 + * Close Thread Pool */ void Stop() { if (running_.exchange(false, std::memory_order_acq_rel)) { @@ -101,10 +101,10 @@ class TaskThreadPool : public Uncopyable { } /** - * push 一个 task 给线程池处理,如果队列满,线程阻塞,直到 task push 进去 - * 需要注意的是用户自己需要保证 task 的有效的。除此之外,此 TaskThreadPool - * 并没有提供获取 f 的返回值,所以如果需要获取运行 f 的一些额外信息,需要用户 - * 自己在 f 内部逻辑添加 + * Push a task to the thread pool for processing. If the queue is full, the thread will block until the task is pushed in + * It should be noted that users themselves need to ensure the effectiveness of the task. In addition, this TaskThreadPool + * There is no provision for obtaining the return value of f, so if you need to obtain some additional information about running f, you need the user to + * Add your own internal logic to f * @tparam F * @tparam Args * @param f @@ -121,40 +121,40 @@ class TaskThreadPool : public Uncopyable { notEmpty_.notify_one(); } - /* 返回线程池 queue 的容量 */ + /*Returns the capacity of the thread pool queue*/ int QueueCapacity() const { return capacity_; } - /* 返回线程池当前 queue 中的 task 数量,线程安全 */ + /*Returns the number of tasks in the current queue of the thread pool, thread safe*/ int QueueSize() const { std::lock_guard guard(mutex_); return queue_.size(); } - /* 返回线程池的线程数 */ + /*Returns the number of threads in the thread pool*/ int ThreadOfNums() const { return threads_.size(); } protected: - /*线程工作时执行的函数*/ + /*Functions executed during thread work*/ virtual void ThreadFunc() { while (running_.load(std::memory_order_acquire)) { Task task(Take()); - /* ThreadPool 退出的时候,queue 为空,那么会返回无效的 task */ + /*When ThreadPool exits, if the queue is empty, an invalid task will be returned*/ if (task) { task(); } } } - /* 判断线程池 queue 是否已经满了, 非线程安全,私有内部使用 */ + /*Determine if the thread pool queue is full, non thread safe, private internal use*/ bool IsFullUnlock() const { return queue_.size() >= static_cast(capacity_); } - /* 从线程池的 queue 中取一个 task 线程安全 */ + /*Taking a task from the queue in the thread pool is thread safe*/ Task Take() { std::unique_lock guard(mutex_); while (queue_.empty() && running_.load(std::memory_order_acquire)) { diff --git a/src/common/configuration.cpp b/src/common/configuration.cpp index 4496045be6..2ae119ba1e 100644 --- a/src/common/configuration.cpp +++ b/src/common/configuration.cpp @@ -61,8 +61,8 @@ bool Configuration::LoadConfig() { } bool Configuration::SaveConfig() { - // 当前先只保存配置,原文件的注释等内容先忽略 - // TODO(yyk): 后续考虑改成原文件格式不变,只修改配置值 + // Currently, only the configuration is saved, and the comments and other contents of the original file are ignored + // TODO(yyk): In the future, consider changing to the original file format without changing, only modifying the configuration values std::ofstream wStream(confFile_); if (wStream.is_open()) { for (auto& pair : config_) { @@ -104,13 +104,13 @@ void Configuration::UpdateMetricIfExposed(const std::string &key, } auto it = configMetric_.find(key); - // 如果配置项不存在,则新建配置项 + // If the configuration item does not exist, create a new configuration item if (it == configMetric_.end()) { ConfigItemPtr configItem = std::make_shared(); configItem->ExposeAs(exposeName_, key); configMetric_[key] = configItem; } - // 更新配置项 + // Update Configuration Items configMetric_[key]->Set("conf_name", key); configMetric_[key]->Set("conf_value", value); configMetric_[key]->Update(); diff --git a/src/common/configuration.h b/src/common/configuration.h index d546995ade..f35471be7b 100644 --- a/src/common/configuration.h +++ b/src/common/configuration.h @@ -45,9 +45,9 @@ class Configuration { void PrintConfig(); std::map ListConfig() const; /** - * 暴露config的metric供采集 - * 如果metric已经暴露,则直接返回 - * @param exposeName: 对外暴露的metric的名字 + * Expose the metric of config for collection + * If the metric has already been exposed, return it directly + * @param exposeName: The name of the exposed metric */ void ExposeMetric(const std::string& exposeName); @@ -56,24 +56,24 @@ class Configuration { std::string GetStringValue(const std::string &key); /* - * @brief GetStringValue 获取指定配置项的值 + * @brief GetStringValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetStringValue(const std::string &key, std::string *out); void SetStringValue(const std::string &key, const std::string &value); int GetIntValue(const std::string &key, uint64_t defaultvalue = 0); /* - * @brief GetIntValue/GetUInt32Value/GetUInt64Value 获取指定配置项的值 //NOLINT + * @brief GetIntValue/GetUInt32Value/GetUInt64Value Get the value of the specified configuration item//NOLINT * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetIntValue(const std::string &key, int *out); bool GetUInt32Value(const std::string &key, uint32_t *out); @@ -87,36 +87,36 @@ class Configuration { double GetDoubleValue(const std::string &key, double defaultvalue = 0.0); /* - * @brief GetDoubleValue 获取指定配置项的值 + * @brief GetDoubleValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetDoubleValue(const std::string &key, double *out); void SetDoubleValue(const std::string &key, const double value); double GetFloatValue(const std::string &key, float defaultvalue = 0.0); /* - * @brief GetFloatValue 获取指定配置项的值 + * @brief GetFloatValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetFloatValue(const std::string &key, float *out); void SetFloatValue(const std::string &key, const float value); bool GetBoolValue(const std::string &key, bool defaultvalue = false); /* - * @brief GetBoolValue 获取指定配置项的值 + * @brief GetBoolValue Get the value of the specified configuration item * - * @param[in] key 配置项名称 - * @param[out] out 获取的值 + * @param[in] key configuration item name + * @param[out] out The value obtained * - * @return false-未获取到 true-获取成功 + * @return false-did not obtain, true-obtained successfully */ bool GetBoolValue(const std::string &key, bool *out); void SetBoolValue(const std::string &key, const bool value); @@ -126,12 +126,12 @@ class Configuration { void SetValue(const std::string &key, const std::string &value); /* - * @brief GetValueFatalIfFail 获取指定配置项的值,失败打FATAL日志 + * @brief GetValueFatalIfFail to obtain the value of the specified configuration item, failed to log FATAL * - * @param[in] key 配置项名称 - * @param[out] value 获取的值 + * @param[in] key configuration item name + * @param[out] value The value obtained * - * @return 无 + * @return None */ void GetValueFatalIfFail(const std::string& key, int* value); void GetValueFatalIfFail(const std::string& key, std::string* value); @@ -171,8 +171,8 @@ class Configuration { private: /** - * 更新新的配置到metric - * @param 要更新的metric + *Update new configuration to metric + * @param The metric to update */ void UpdateMetricIfExposed(const std::string &key, const std::string &value); @@ -180,9 +180,9 @@ class Configuration { private: std::string confFile_; std::map config_; - // metric对外暴露的名字 + //Metric's exposed name std::string exposeName_; - // 每一个配置项使用单独的一个metric,用map管理 + //Each configuration item uses a separate metric and is managed using a map ConfigMetricMap configMetric_; }; diff --git a/src/common/crc32.h b/src/common/crc32.h index 99916fe873..58e5846391 100644 --- a/src/common/crc32.h +++ b/src/common/crc32.h @@ -32,23 +32,23 @@ namespace curve { namespace common { /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on the crc32 library of brpc + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ inline uint32_t CRC32(const char *pData, size_t iLen) { return butil::crc32c::Value(pData, iLen); } /** - * 计算数据的CRC32校验码(CRC32C),基于brpc的crc32库进行封装. 此函数支持继承式 - * 计算,以支持对SGL类型的数据计算单个CRC校验码。满足如下约束: + * Calculate the CRC32 checksum (CRC32C) of the data and encapsulate it based on the crc32 library of brpc This function supports inheritance + * Calculate to support the calculation of a single CRC checksum for SGL type data. Meet the following constraints: * CRC32("hello world", 11) == CRC32(CRC32("hello ", 6), "world", 5) - * @param crc 起始的crc校验码 - * @param pData 待计算的数据 - * @param iLen 待计算的数据长度 - * @return 32位的数据CRC32校验码 + * @param crc starting crc checksum + * @param pData The data to be calculated + * @param iLen The length of data to be calculated + * @return 32-bit data CRC32 checksum */ inline uint32_t CRC32(uint32_t crc, const char *pData, size_t iLen) { return butil::crc32c::Extend(crc, pData, iLen); diff --git a/src/common/curve_define.h b/src/common/curve_define.h index 04d07ad5ec..9d5b62533b 100644 --- a/src/common/curve_define.h +++ b/src/common/curve_define.h @@ -34,7 +34,7 @@ namespace curve { namespace common { -// curve系统中共用的定义,对于各模块自己独有的放在各模块自己的define中 +// The definition shared in the curve system is unique to each module and placed in its own definition using ChunkID = uint64_t; using CopysetID = uint32_t; using ChunkIndex = uint32_t; @@ -55,7 +55,7 @@ const uint32_t kKB = 1024; const uint32_t kMB = 1024*kKB; const uint32_t kGB = 1024*kMB; -// maigic number用于FilePool_meta file计算crc +// maigic number for FilePool_meta file calculation of crc const char kFilePoolMagic[3] = "01"; constexpr uint32_t kDefaultBlockSize = 4096; diff --git a/src/common/define.h b/src/common/define.h index e3f90d7bd0..0c1cb2705d 100644 --- a/src/common/define.h +++ b/src/common/define.h @@ -29,7 +29,7 @@ namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -76,54 +76,54 @@ enum class CloneTaskType { kRecover }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; diff --git a/src/common/fs_util.h b/src/common/fs_util.h index 3e591fd5ca..57e1708916 100644 --- a/src/common/fs_util.h +++ b/src/common/fs_util.h @@ -31,7 +31,7 @@ namespace curve { namespace common { -// 计算path2相对于path1的相对路径 +// Calculate the relative path of path2 relative to path1 inline std::string CalcRelativePath(const std::string &path1, const std::string &path2) { if (path1.empty() || path2.empty()) { diff --git a/src/common/interruptible_sleeper.h b/src/common/interruptible_sleeper.h index 73c2cba645..241d464308 100644 --- a/src/common/interruptible_sleeper.h +++ b/src/common/interruptible_sleeper.h @@ -29,18 +29,18 @@ namespace curve { namespace common { /** - * InterruptibleSleeper 实现可 interruptible 的 sleep 功能. - * 正常情况下 wait_for 超时, 接收到退出信号之后, 程序会立即被唤醒, - * 退出 while 循环, 并执行 cleanup 代码. + * Implement interruptible sleep functionality with InterruptibleSleeper. + * Under normal circumstances, when wait_for times out and receives an exit signal, + * the program will be immediately awakened, exit the while loop, and execute cleanup code. */ class InterruptibleSleeper { public: /** - * @brief wait_for 等待指定时间,如果接受到退出信号立刻返回 + * @brief wait_for Wait for the specified time, and immediately return if an exit signal is received * - * @param[in] time 指定wait时长 + * @param[in] time specifies the wait duration * - * @return false-收到退出信号 true-超时后退出 + * @return false - Received exit signal true - Exit after timeout */ template bool wait_for(std::chrono::duration const& time) { @@ -49,7 +49,7 @@ class InterruptibleSleeper { } /** - * @brief interrupt 给当前wait发送退出信号 + * @brief interrupt Send an exit signal to the current wait */ void interrupt() { UniqueLock lock(m); diff --git a/src/common/location_operator.cpp b/src/common/location_operator.cpp index f9d5a8f4c8..8fe3a97974 100644 --- a/src/common/location_operator.cpp +++ b/src/common/location_operator.cpp @@ -44,8 +44,8 @@ std::string LocationOperator::GenerateCurveLocation( OriginType LocationOperator::ParseLocation( const std::string& location, std::string* originPath) { - // 找到最后一个“@”,不能简单用SplitString - // 因为不能保证OriginPath中不包含“@” + // Found the last '@', cannot simply use SplitString + // Because it cannot be guaranteed that OriginPath does not contain '@' std::string::size_type pos = location.find_last_of(kOriginTypeSeprator); if (std::string::npos == pos) { diff --git a/src/common/location_operator.h b/src/common/location_operator.h index a86b33d158..cc3de61a22 100644 --- a/src/common/location_operator.h +++ b/src/common/location_operator.h @@ -43,39 +43,39 @@ enum class OriginType { class LocationOperator { public: /** - * 生成s3的location - * location格式:${objectname}@s3 - * @param objectName:s3上object的名称 - * @return:生成的location + * Generate location for s3 + * location format: ${objectname}@s3 + * @param objectName: The name of the object on s3 + * @return: Generated location */ static std::string GenerateS3Location(const std::string& objectName); /** - * 生成curve的location - * location格式:${filename}:${offset}@cs + * Generate the location of the curve + * location format: ${filename}:${offset}@cs */ static std::string GenerateCurveLocation(const std::string& fileName, off_t offset); /** - * 解析数据源的位置信息 - * location格式: - * s3示例:${objectname}@s3 - * curve示例:${filename}:${offset}@cs + * Parsing the location information of data sources + * location format: + * example of s3: ${objectname}@s3 + * curve example: ${filename}:${offset}@cs * - * @param location[in]:数据源的位置,其格式为originPath@originType - * @param originPath[out]:表示数据源在源端的路径 - * @return:返回OriginType,表示源数据的源端类型是s3还是curve - * 如果路径格式不正确或者originType无法识别,则返回InvalidOrigin + * @param location[in]: The location of the data source, in the format originPath@originType + * @param originPath[out]: represents the path of the data source on the source side + * @return: Returns OriginType, indicating whether the source side type of the source data is s3 or curve + * If the path format is incorrect or the originType is not recognized, InvalidOrigin is returned */ static OriginType ParseLocation(const std::string& location, std::string* originPath); /** - * 解析curvefs的originPath - * 格式:${filename}:${offset} - * @param originPath[in]:数据源在curvefs上的路径 - * @param fileName[out]:数据源所属文件名 - * @param offset[out]:数据源在文件中的偏移 - * @return: 解析成功返回true,失败返回false + * Parsing the originPath of curves + * Format: ${filename}:${offset} + * @param originPath[in]: The path of the data source on curves + * @param fileName[out]: The file name to which the data source belongs + * @param offset[out]: The offset of the data source in the file + * @return: Successful parsing returns true, while failure returns false */ static bool ParseCurveChunkPath(const std::string& originPath, std::string* fileName, diff --git a/src/common/net_common.h b/src/common/net_common.h index 8bf058e134..03126b9945 100644 --- a/src/common/net_common.h +++ b/src/common/net_common.h @@ -34,14 +34,14 @@ namespace curve { namespace common { class NetCommon { public: - // addr形式为"ip:port" + // The form of addr is "ip:port" static bool CheckAddressValid(const std::string& addr) { std::string ip; uint32_t port; return SplitAddrToIpPort(addr, &ip, &port); } - // addr形式为"ip:port" + // The form of addr is "ip:port" static bool SplitAddrToIpPort(const std::string& addr, std::string* ipstr, uint32_t* port) { diff --git a/src/common/s3_adapter.cpp b/src/common/s3_adapter.cpp index dba09d1b56..c8ab50d302 100644 --- a/src/common/s3_adapter.cpp +++ b/src/common/s3_adapter.cpp @@ -553,7 +553,7 @@ int S3Adapter::DeleteObjects(const std::list& keyList) { return 0; } /* - // object元数据单独更新还有问题,需要单独的s3接口来支持 +// There are still issues with updating the object metadata separately, and a separate s3 interface is needed to support it int S3Adapter::UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta) { Aws::S3::Model::PutObjectRequest request; diff --git a/src/common/s3_adapter.h b/src/common/s3_adapter.h index a6dc8f6c61..534600b964 100644 --- a/src/common/s3_adapter.h +++ b/src/common/s3_adapter.h @@ -148,16 +148,16 @@ class S3Adapter { } virtual ~S3Adapter() { Deinit(); } /** - * 初始化S3Adapter + * Initialize S3Adapter */ virtual void Init(const std::string &path); /** - * 初始化S3Adapter - * 但不包括 S3InfoOption + * Initialize S3Adapter + * But not including S3InfoOption */ virtual void InitExceptFsS3Option(const std::string &path); /** - * 初始化S3Adapter + * Initialize S3Adapter */ virtual void Init(const S3AdapterOption &option); /** @@ -168,7 +168,7 @@ class S3Adapter { virtual void SetS3Option(const S3InfoOption &fsS3Opt); /** - * 释放S3Adapter资源 + * Release S3Adapter resources */ virtual void Deinit(); /** @@ -192,26 +192,26 @@ class S3Adapter { */ virtual std::string GetS3Endpoint(); /** - * 创建存储快照数据的桶(桶名称由配置文件指定,需要全局唯一) - * @return: 0 创建成功/ -1 创建失败 + * Create a bucket for storing snapshot data (the bucket name is specified by the configuration file and needs to be globally unique) + * @return: 0 successfully created/-1 failed to create */ virtual int CreateBucket(); /** - * 删除桶 - * @return 0 删除成功/-1 删除失败 + * Delete Bucket + * @return 0 deleted successfully/-1 deleted failed */ virtual int DeleteBucket(); /** - * 判断快照数据的桶是否存在 - * @return true 桶存在/ false 桶不存在 + * Determine whether the bucket of snapshot data exists + * @return true bucket exists/false bucket does not exist */ virtual bool BucketExist(); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @param 数据内容大小 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @param data content size + * @return: 0 Upload successful/-1 Upload failed */ virtual int PutObject(const Aws::String &key, const char *buffer, const size_t bufferSize); @@ -219,10 +219,10 @@ class S3Adapter { // int GetObject(const Aws::String &key, void *buffer, // const int bufferSize); /** - * 上传数据到对象存储 - * @param 对象名 - * @param 数据内容 - * @return:0 上传成功/ -1 上传失败 + * Upload data to object storage + * @param object name + * @param data content + * @return: 0 Upload successful/-1 Upload failed */ virtual int PutObject(const Aws::String &key, const std::string &data); virtual void PutObjectAsync(std::shared_ptr context); @@ -238,38 +238,38 @@ class S3Adapter { */ virtual int GetObject(const Aws::String &key, std::string *data); /** - * 从对象存储读取数据 - * @param 对象名 - * @param[out] 返回读取的数据 - * @param 读取的偏移 - * @param 读取的长度 + * Reading data from object storage + * @param object name + * @param[out] returns the read data + * @param read Offset read + * @param The read length read */ virtual int GetObject(const std::string &key, char *buf, off_t offset, size_t len); // NOLINT /** - * @brief 异步从对象存储读取数据 + * @brief asynchronously reads data from object storage * - * @param context 异步上下文 + * @param context asynchronous context */ virtual void GetObjectAsync(std::shared_ptr context); /** - * 删除对象 - * @param 对象名 - * @return: 0 删除成功/ - + * Delete Object + * @param object name + * @return: 0 successfully deleted/- */ virtual int DeleteObject(const Aws::String &key); virtual int DeleteObjects(const std::list &keyList); /** - * 判断对象是否存在 - * @param 对象名 - * @return: true 对象存在/ false 对象不存在 + * Determine whether the object exists + * @param object name + * @return: true object exists/false object does not exist */ virtual bool ObjectExist(const Aws::String &key); /* // Update object meta content - // Todo 接口还有问题 need fix + // There are still issues with the Todo interface, need fix virtual int UpdateObjectMeta(const Aws::String &key, const Aws::Map &meta); // Get object meta content @@ -277,38 +277,38 @@ class S3Adapter { Aws::Map *meta); */ /** - * 初始化对象的分片上传任务 - * @param 对象名 - * @return 任务名 + * Initialize the sharding upload task of the object + * @param object name + * @return Task Name */ virtual Aws::String MultiUploadInit(const Aws::String &key); /** - * 增加一个分片到分片上传任务中 - * @param 对象名 - * @param 任务名 - * @param 第几个分片(从1开始) - * @param 分片大小 - * @param 分片的数据内容 - * @return: 分片任务管理对象 + * Add a shard to the shard upload task + * @param object name + * @param Task Name + * @param Which shard (starting from 1) + * @param shard size + * @param sharded data content + * @return: Fragmented task management object */ virtual Aws::S3::Model::CompletedPart UploadOnePart(const Aws::String &key, const Aws::String &uploadId, int partNum, int partSize, const char *buf); /** - * 完成分片上传任务 - * @param 对象名 - * @param 分片上传任务id - * @管理分片上传任务的vector - * @return 0 任务完成/ -1 任务失败 + * Complete the shard upload task + * @param object name + * @param Partitioning Upload Task ID + * @param Manage vector for sharded upload tasks + * @return 0 task completed/-1 task failed */ virtual int CompleteMultiUpload(const Aws::String &key, const Aws::String &uploadId, const Aws::Vector &cp_v); /** - * 终止一个对象的分片上传任务 - * @param 对象名 - * @param 任务id - * @return 0 终止成功/ -1 终止失败 + * Terminate the sharding upload task of an object + * @param object name + * @param Task ID + * @return 0 Terminated successfully/-1 Terminated failed */ virtual int AbortMultiUpload(const Aws::String &key, const Aws::String &uploadId); @@ -336,14 +336,14 @@ class S3Adapter { }; private: - // S3服务器地址 + // S3 server address Aws::String s3Address_; - // 用于用户认证的AK/SK,需要从对象存储的用户管理中申请 + // AK/SK for user authentication needs to be applied for from user management in object storage Aws::String s3Ak_; Aws::String s3Sk_; - // 对象的桶名 + // The bucket name of the object Aws::String bucketName_; - // aws sdk的配置 + // Configuration of AWS SDK Aws::Client::ClientConfiguration *clientCfg_; Aws::S3::S3Client *s3Client_; Configuration conf_; diff --git a/src/common/snapshotclone/snapshotclone_define.cpp b/src/common/snapshotclone/snapshotclone_define.cpp index b3b08f8d74..4186335b5d 100644 --- a/src/common/snapshotclone/snapshotclone_define.cpp +++ b/src/common/snapshotclone/snapshotclone_define.cpp @@ -27,7 +27,7 @@ namespace curve { namespace snapshotcloneserver { -// 字符串常量定义 +// String constant definition const char* kServiceName = "SnapshotCloneService"; const char* kCreateSnapshotAction = "CreateSnapshot"; const char* kDeleteSnapshotAction = "DeleteSnapshot"; diff --git a/src/common/snapshotclone/snapshotclone_define.h b/src/common/snapshotclone/snapshotclone_define.h index ffa5428a6e..2d3902bab7 100644 --- a/src/common/snapshotclone/snapshotclone_define.h +++ b/src/common/snapshotclone/snapshotclone_define.h @@ -29,7 +29,7 @@ namespace curve { namespace snapshotcloneserver { -// snapshotcloneservice字符串常量定义 +// snapshotcloneservice string constant definition extern const char* kServiceName; // action extern const char* kCreateSnapshotAction; @@ -85,54 +85,54 @@ enum class CloneRefStatus { kNeedCheck = 2 }; -// 未初始序列号 +// Uninitialized serial number const uint64_t kUnInitializeSeqNum = 0; -// 初始序列号 +// Initial serial number const uint64_t kInitializeSeqNum = 1; -// 错误码:执行成功 +// Error code: Execution successful const int kErrCodeSuccess = 0; -// 错误码: 内部错误 +// Error code: Internal error const int kErrCodeInternalError = -1; -// 错误码:服务器初始化失败 +// Error code: Server initialization failed const int kErrCodeServerInitFail = -2; -// 错误码:服务器启动失败 +// Error code: Server startup failed const int kErrCodeServerStartFail = -3; -// 错误码:服务已停止 +// Error code: Service stopped const int kErrCodeServiceIsStop = -4; -// 错误码:非法请求 +// Error code: Illegal request const int kErrCodeInvalidRequest = -5; -// 错误码:任务已存在 +// Error code: Task already exists const int kErrCodeTaskExist = -6; -// 错误码:非法的用户 +// Error code: Illegal user const int kErrCodeInvalidUser = -7; -// 错误码:文件不存在 +// Error code: File does not exist const int kErrCodeFileNotExist = -8; -// 错误码:文件状态异常 +// Error code: File status abnormal const int kErrCodeFileStatusInvalid = -9; -// 错误码:chunk大小未按chunk分片大小对齐 +// Error code: Chunk size not aligned with chunk partition size const int kErrCodeChunkSizeNotAligned = -10; -// 错误码:文件名不匹配 +// Error code: File name mismatch const int kErrCodeFileNameNotMatch = -11; -// 错误码: 不能删除未完成的快照 +// Error code: Unable to delete incomplete snapshot const int kErrCodeSnapshotCannotDeleteUnfinished = -12; -// 错误码: 不能对存在异常快照的文件打快照,或不能对存在错误的目标文件克隆/恢复 +// Error code: Cannot take a snapshot of files with abnormal snapshots, or cannot clone/recover target files with errors const int kErrCodeSnapshotCannotCreateWhenError = -13; -// 错误码:取消的快照已完成 +// Error code: Canceled snapshot completed const int kErrCodeCannotCancelFinished = -14; -// 错误码:不能从未完成或存在错误的快照克隆 +// Error code: Cannot clone a snapshot that has never been completed or has errors const int kErrCodeInvalidSnapshot = -15; -// 错误码:不能删除正在克隆的快照 +// Error code: Unable to delete snapshot being cloned const int kErrCodeSnapshotCannotDeleteCloning = -16; -// 错误码:不能清理未完成的克隆 +// Error code: Unable to clean up incomplete clones const int kErrCodeCannotCleanCloneUnfinished = -17; -// 错误码:快照到达上限 +// Error code: The snapshot has reached the upper limit const int kErrCodeSnapshotCountReachLimit = -18; -// 错误码:文件已存在 +// Error code: File already exists const int kErrCodeFileExist = -19; -// 错误码:克隆任务已满 +// Error code: Clone task is full const int kErrCodeTaskIsFull = -20; -// 错误码:不支持 +// Error code: not supported const int kErrCodeNotSupport = -21; extern std::map code2Msg; diff --git a/src/common/stringstatus.h b/src/common/stringstatus.h index 203b851bfc..20a8954d7b 100644 --- a/src/common/stringstatus.h +++ b/src/common/stringstatus.h @@ -33,15 +33,15 @@ namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs is used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix, prefix + * @param[in] name, first name */ void ExposeAs(const std::string &prefix, const std::string &name); /** - * @brief Set 设置每项key-value信息 + * @brief Set sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,26 +49,26 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update sets the key-value pairs in the current key-value map to status as JSON strings// NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key Specify the key */ std::string GetValueByKey(const std::string &key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody obtains the JSON format string corresponding to the current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common diff --git a/src/common/timeutility.h b/src/common/timeutility.h index 1ba3483d34..beca1339bc 100644 --- a/src/common/timeutility.h +++ b/src/common/timeutility.h @@ -57,7 +57,7 @@ class TimeUtility { return localtime(&now)->tm_hour; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -67,7 +67,7 @@ class TimeUtility { *standard = std::string(now); } - // 时间戳转成标准时间并返回,时间戳单位为秒 + // The timestamp is converted to standard time and returned in seconds static inline std::string TimeStampToStandard(time_t timeStamp) { char now[64]; struct tm p; diff --git a/src/common/uuid.h b/src/common/uuid.h index 8fbc41f61c..fd1cf0c3b8 100644 --- a/src/common/uuid.h +++ b/src/common/uuid.h @@ -27,26 +27,26 @@ extern "C" { void uuid_generate(uuid_t out); void uuid_generate_random(uuid_t out); void uuid_generate_time(uuid_t out); -// 指明由uuid_generate_time生成的uuid是否使用了时间同步机制,不进行封装。 +// Indicate whether the UUID generated by uuid_generate_time utilizes a time synchronization mechanism without encapsulation int uuid_generate_time_safe(uuid_t out); } #include #define BUFF_LEN 36 namespace curve { namespace common { -// 生成uuid的生成器 +// Generator for generating uuid class UUIDGenerator { public: UUIDGenerator() {} /** - * @brief 生成uuid,优先采用的算法 - * 如果存在一个高质量的随机数生成器(/dev/urandom), - * UUID将基于其生成的随机数产生。 - * 备用算法:在高质量的随机数生成器不可用的情况下,如果可以获取到MAC地址, - * 则将利用由随机数生成器产生的随机数、当前时间、MAC地址生成UUID。 + * @brief Generate a UUID with a prioritized algorithm selection + * If a high-quality random number generator (/dev/urandom) is available, + * the UUID will be generated based on the random numbers it generates. + * Backup algorithm: If a high-quality random number generator is not available and if the MAC address can be obtained, + * the UUID will be generated using a combination of random numbers, current time, and the MAC address. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUID() { uuid_t out; @@ -58,11 +58,11 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 使用全局时钟、MAC地址。有MAC地址泄露风险。为了保证唯一性还使用的时间同步机制, - * 如果,时间同步机制不可用,多台机器上生成的uuid可能会重复。 + * @brief Generate a UUID for the specified purpose. + * It utilizes the global clock and MAC address, but there is a risk of MAC address leakage. To ensure uniqueness, it also employs a time synchronization mechanism. + * However, if the time synchronization mechanism is not available, there is a possibility of UUID duplication when generated on multiple machines. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUIDTime() { uuid_t out; @@ -74,10 +74,10 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 强制完全使用随机数,优先使用(/dev/urandom),备用(伪随机数生成器)。 - * 在使用伪随机数生成器的情况下,uuid有重复的风险。 - * @return 生成的uuid + * @brief Generate a UUID + * Forcefully utilize random numbers, with a preference for (/dev/urandom) and a fallback to pseudo-random number generation. + * When using the pseudo-random number generator, there is a risk of UUID duplication. + * @return The generated UUID */ std::string GenerateUUIDRandom() { uuid_t out; diff --git a/src/common/wait_interval.h b/src/common/wait_interval.h index 69c82143c2..2f3fce02fc 100644 --- a/src/common/wait_interval.h +++ b/src/common/wait_interval.h @@ -30,26 +30,26 @@ namespace common { class WaitInterval { public: /** - * Init 初始化任务的执行间隔 + * Init Execution interval of initialization task * - * @param[in] intervalMs 执行间隔单位是ms + * @param[in] intervalMs The execution interval unit is ms */ void Init(uint64_t intervalMs); /** - * WaitForNextExcution 根据最近一次的执行时间点和周期确定需要等待多久之后再执行 + * WaitForNextExcution determines how long to wait before executing based on the latest execution time and cycle */ void WaitForNextExcution(); /** - * StopWait 退出sleep等待 + * StopWait Exit Sleep Wait */ void StopWait(); private: - // 最近一次的执行时间 + // Last execution time uint64_t lastSend_; - // 任务的执行周期 + // Task execution cycle uint64_t intevalMs_; InterruptibleSleeper sleeper_; diff --git a/src/fs/ext4_filesystem_impl.cpp b/src/fs/ext4_filesystem_impl.cpp index f4cd6cfcdb..903434c8ea 100644 --- a/src/fs/ext4_filesystem_impl.cpp +++ b/src/fs/ext4_filesystem_impl.cpp @@ -79,8 +79,8 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { LOG(INFO) << "System version: " << kernel_info.version; LOG(INFO) << "Machine: " << kernel_info.machine; - // 通过uname获取的版本字符串格式可能为a.b.c-xxx - // a为主版本号,b为此版本号,c为修正号 + // The version string format obtained through uname may be a.b.c-xxx + // A is the main version number, b is the version number, and c is the revision number vector elements; ::curve::common::SplitString(kernel_info.release, "-", &elements); if (elements.size() == 0) { @@ -90,7 +90,7 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { vector numbers; ::curve::common::SplitString(elements[0], ".", &numbers); - // 有些系统可能版本格式前面部分是a.b.c.d,但是a.b.c是不变的 + // Some systems may have a version format with the front part being a.b.c.d, but a.b.c remains unchanged if (numbers.size() < 3) { LOG(ERROR) << "parse kenel version failed."; return false; @@ -103,7 +103,7 @@ bool Ext4FileSystemImpl::CheckKernelVersion() { << ", minor: " << minor << ", revision: " << revision; - // 内核版本必须大于3.15,用于支持renameat2 + // The kernel version must be greater than 3.15 to support renameat2 if (KERNEL_VERSION(major, minor, revision) < MIN_KERNEL_VERSION) { LOG(ERROR) << "Kernel older than 3.15 is not supported."; return false; @@ -157,7 +157,7 @@ int Ext4FileSystemImpl::Close(int fd) { int Ext4FileSystemImpl::Delete(const string& path) { int rc = 0; - // 如果删除对象是目录的话,需要先删除目录下的子对象 + // If the deleted object is a directory, you need to first delete the sub objects under the directory if (DirExists(path)) { vector names; rc = List(path, &names); @@ -167,7 +167,7 @@ int Ext4FileSystemImpl::Delete(const string& path) { } for (auto &name : names) { string subPath = path + "/" + name; - // 递归删除子对象 + // Recursively delete sub objects rc = Delete(subPath); if (rc < 0) { LOG(WARNING) << "Delete " << subPath << " failed."; @@ -194,13 +194,13 @@ int Ext4FileSystemImpl::Mkdir(const string& dirName) { string path; for (size_t i = 0; i < names.size(); ++i) { - if (0 == i && dirName[0] != '/') // 相对路径 + if (0 == i && dirName[0] != '/') // Relative path path = path + names[i]; else path = path + "/" + names[i]; if (DirExists(path)) continue; - // 目录需要755权限,不然会出现“Permission denied” + // Directory requires 755 permissions, otherwise 'Permission denied' will appear if (posixWrapper_->mkdir(path.c_str(), 0755) < 0) { LOG(WARNING) << "mkdir " << path << " failed. "<< strerror(errno); return -errno; @@ -260,7 +260,7 @@ int Ext4FileSystemImpl::List(const string& dirName, continue; names->push_back(dirIter->d_name); } - // 可能存在其他携程改变了errno,但是只能通过此方式判断readdir是否成功 + // There may be other Ctrip changes to errno, but this is the only way to determine whether readdir is successful if (errno != 0) { LOG(WARNING) << "readdir failed: " << strerror(errno); } @@ -280,7 +280,7 @@ int Ext4FileSystemImpl::Read(int fd, buf + relativeOffset, remainLength, offset); - // 如果offset大于文件长度,pread会返回0 + // If the offset is greater than the file length, pread will return 0 if (ret == 0) { LOG(WARNING) << "pread returns zero." << "offset: " << offset diff --git a/src/fs/local_filesystem.h b/src/fs/local_filesystem.h index 3072867807..e9e992a048 100644 --- a/src/fs/local_filesystem.h +++ b/src/fs/local_filesystem.h @@ -54,77 +54,77 @@ class LocalFileSystem { virtual ~LocalFileSystem() {} /** - * 初始化文件系统 - * 如果文件系统还未格式化,首先会格式化, - * 然后挂载文件系统, - * 已经格式化或者已经挂载的文件系统不会重复格式化或挂载 - * @param option:初始化参数 + * Initialize file system + * If the file system has not been formatted yet, it will be formatted first, + * Then mount the file system, + * Formatted or mounted file systems will not be repeatedly formatted or mounted + * @param option: initialization parameters */ virtual int Init(const LocalFileSystemOption& option) = 0; /** - * 获取文件或目录所在的文件系统状态信息 - * @param path: 要获取的文件系统下的文件或目录路径 - * @param info[out]: 文件系统状态信息 - * @return 成功返回0 + * Obtain the file system status information where the file or directory is located + * @param path: The file or directory path under the file system to obtain + * @param info[out]: File system status information + * @return Successfully returned 0 */ virtual int Statfs(const string& path, struct FileSystemInfo* info) = 0; /** - * 打开文件句柄 - * @param path:文件路径 - * @param flags:操作文件方式的flag - * 此flag使用POSIX文件系统的定义 - * @return 成功返回文件句柄id,失败返回负值 + * Open file handle + * @param path: File path + * @param flags: flags for manipulating file methods + * This flag uses the definition of the POSIX file system + * @return successfully returns the file handle id, while failure returns a negative value */ virtual int Open(const string& path, int flags) = 0; /** - * 关闭文件句柄 - * @param fd: 文件句柄id - * @return 成功返回0 + * Close file handle + * @param fd: file handle id + * @return Successfully returned 0 */ virtual int Close(int fd) = 0; /** - * 删除文件或目录 - * 如果删除对象为目录,会删除目录下的文件或子目录 - * @param path:文件或目录的路径 - * return 成功返回0 + * Delete files or directories + * If the deleted object is a directory, the files or subdirectories under the directory will be deleted + * @param path: The path to a file or directory + * @return Successful return returns 0 */ virtual int Delete(const string& path) = 0; /** - * 创建目录 - * @param dirPath: 目录路径 - * @return 成功返回0 + * Create directory + * @param dirPath: Directory path + * @return Successfully returned 0 */ virtual int Mkdir(const string& dirPath) = 0; /** - * 判断目录是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the directory exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool DirExists(const string& dirPath) = 0; /** - * 判断文件是否存在 - * @param dirPath:目录路径 - * @return 存在返回true,否则返回false + * Determine if the file exists + * @param dirPath: Directory path + * @return returns true, otherwise returns false */ virtual bool FileExists(const string& filePath) = 0; /** - * 重命名文件/目录 - * 将文件或目录重命名或者移到其他路径,不会覆盖已存在的文件 - * @param oldPath:原文件或目录路径 - * @param newPath:新的文件或目录路径 - * 新的文件或目录在重命名之前不存在,否则返回错误 - * @param flags:重命名使用的模式,默认值为0 - * 可选择RENAME_EXCHANGE、RENAME_EXCHANGE、RENAME_WHITEOUT三种模式 + * Rename File/Directory + * Renaming or moving files or directories to a different path will not overwrite existing files + * @param oldPath: Path to the original file or directory + * @param newPath: New file or directory path + * The new file or directory does not exist before renaming, otherwise an error will be returned + * @param flags: The mode used for renaming, with a default value of 0 + * Optional RENAME_EXCHANGE, RENAME_EXCHANGE, RENAME_WHITEOUT three modes * https://manpages.debian.org/testing/manpages-dev/renameat2.2.en.html - * @return 成功返回0 + * @return Successfully returned 0 */ virtual int Rename(const string& oldPath, const string& newPath, @@ -133,40 +133,40 @@ class LocalFileSystem { } /** - * 列举指定路径下的所有文件和目录名 - * @param dirPath:目录路径 - * @param name[out]:目录下的所有目录和文件名 - * @return 成功返回0 + * List all files and directory names under the specified path + * @param dirPath: Directory path + * @param name[out]: All directories and file names under the directory + * @return Successfully returned 0 */ virtual int List(const string& dirPath, vector* names) = 0; /** - * 从文件指定区域读取数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:接收读取数据的buffer - * @param offset:读取区域的起始偏移 - * @param length:读取数据的长度 - * @return 返回成功读取到的数据长度,失败返回-1 + * Read data from the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for receiving and reading data + * @param offset: The starting offset of the read area + * @param length: The length of the read data + * @return returns the length of the data successfully read, while failure returns -1 */ virtual int Read(int fd, char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据的buffer - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: The buffer of the data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure returns -1 */ virtual int Write(int fd, const char* buf, uint64_t offset, int length) = 0; /** - * 向文件指定区域写入数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待写入数据 - * @param offset:写入区域的起始偏移 - * @param length:写入数据的长度 - * @return 返回成功写入的数据长度,失败返回-1 + * Write data to the specified area of the file + * @param fd: File handle id, obtained through the Open interface + * @param buf: Data to be written + * @param offset: The starting offset of the write area + * @param length: The length of the written data + * @return returns the length of successfully written data, while failure returns -1 */ virtual int Write(int fd, butil::IOBuf buf, uint64_t offset, int length) = 0; @@ -181,37 +181,37 @@ class LocalFileSystem { virtual int Sync(int fd) = 0; /** - * 向文件末尾追加数据 - * @param fd:文件句柄id,通过Open接口获取 - * @param buf:待追加数据的buffer - * @param length:追加数据的长度 - * @return 返回成功追加的数据长度,失败返回-1 + * Append data to the end of the file + * @param fd: File handle id, obtained through the Open interface + * @param buffer: buffer for data to be added + * @param length: Append the length of the data + * @return returns the length of successfully added data, while failure returns -1 */ virtual int Append(int fd, const char* buf, int length) = 0; /** - * 文件预分配/挖洞(未实现) - * @param fd:文件句柄id,通过Open接口获取 - * @param op:指定操作类型,预分配还是挖洞 - * @param offset:操作区域的起始偏移 - * @param length:操作区域的长度 - * @return 成功返回0 + * File pre allocation/excavation (not implemented) + * @param fd: File handle id, obtained through the Open interface + * @param op: Specify the type of operation, pre allocation or excavation + * @param offset: The starting offset of the operating area + * @param length: The length of the operation area + * @return Successfully returned 0 */ virtual int Fallocate(int fd, int op, uint64_t offset, int length) = 0; /** - * 获取指定文件状态信息 - * @param fd:文件句柄id,通过Open接口获取 - * @param info[out]:文件系统的信息 - * stat结构同POSIX接口中使用的stat - * @return 成功返回0 + * Obtain specified file status information + * @param fd: File handle id, obtained through the Open interface + * @param info[out]: Information about the file system + * The stat structure is the same as the stat used in the POSIX interface + * @return Successfully returned 0 */ virtual int Fstat(int fd, struct stat* info) = 0; /** - * 将文件数据和元数据刷新到磁盘 - * @param fd:文件句柄id,通过Open接口获取 - * @return 成功返回0 + * Flush file data and metadata to disk + * @param fd: File handle id, obtained through the Open interface + * @return Successfully returned 0 */ virtual int Fsync(int fd) = 0; @@ -225,12 +225,12 @@ class LocalFileSystem { class LocalFsFactory { public: /** - * 创建文件系统对象 - * 本地文件系统的工厂方法,根据传入的类型,创建相应的对象 - * 由该接口创建的文件系统会自动进行初始化 - * @param type:文件系统类型 - * @param deviceID: 设备的编号 - * @return 返回本地文件系统对象指针 + * Creating File System Objects + * The factory method of the local file system creates corresponding objects based on the type passed in + * The file system created by this interface will automatically initialize + * @param type: File system type + * @param deviceID: Device number + * @return returns the local file system object pointer */ static std::shared_ptr CreateFs(FileSystemType type, const std::string& deviceID); diff --git a/src/kvstorageclient/etcd_client.h b/src/kvstorageclient/etcd_client.h index 16aec44e6a..b6f1e4ee19 100644 --- a/src/kvstorageclient/etcd_client.h +++ b/src/kvstorageclient/etcd_client.h @@ -209,7 +209,7 @@ class EtcdClientImp : public KVStorageClient { * leader when the session expired after * client offline. * @param[in] electionTimeoutMs the timeout,0 will block always - * @param[out] leaderOid leader的objectId,recorded in objectManager + * @param[out] leaderOid leader's objectId,recorded in objectManager * * @return EtcdErrCode::EtcdCampaignLeaderSuccess success,others fail */ diff --git a/src/leader_election/leader_election.cpp b/src/leader_election/leader_election.cpp index 76884e0b9c..6b87a701d3 100644 --- a/src/leader_election/leader_election.cpp +++ b/src/leader_election/leader_election.cpp @@ -82,7 +82,7 @@ int LeaderElection::ObserveLeader() { // for test fiu_return_on("src/mds/leaderElection/observeLeader", -1); - // 退出当前进程 + // Exit the current process LOG(INFO) << "mds is existing due to the error of leader observation"; raise(SIGTERM); diff --git a/src/leader_election/leader_election.h b/src/leader_election/leader_election.h index 70a28722ec..d2a860409c 100644 --- a/src/leader_election/leader_election.h +++ b/src/leader_election/leader_election.h @@ -37,19 +37,19 @@ using ::curve::kvstorage::EtcdClientImp; using ::curve::common::LEADERCAMPAIGNNPFX; struct LeaderElectionOptions { - // etcd客户端 + // etcd client std::shared_ptr etcdCli; - // 带ttl的session,ttl超时时间内 + // session with ttl, within ttl timeout uint32_t sessionInterSec; - // 竞选leader的超时时间 + // Overtime for running for leader uint32_t electionTimeoutMs; - // leader名称,建议使用ip+port以示区分 + // leader name, it is recommended to use ip+port for differentiation std::string leaderUniqueName; - // 需要竞选的key + // key that need to be contested std::string campaginPrefix; }; @@ -61,24 +61,24 @@ class LeaderElection { } /** - * @brief CampaignLeader 竞选leader + * @brief CampaignLeader * - * @return 0表示竞选成功 -1表示竞选失败 + * @return 0 indicates a successful election, -1 indicates a failed election */ int CampaignLeader(); /** - * @brief StartObserverLeader 启动leader节点监测线程 + * @brief StartObserverLeader starts the leader node monitoring thread */ void StartObserverLeader(); /** - * @brief LeaderResign leader主动卸任leader,卸任成功后其他节点可以竞选leader + * @brief LeaderResign Leader proactively resigns from its leadership position. After successful resignation, other nodes can compete to become the new leader */ int LeaderResign(); /** - * @brief 返回leader name + * @brief returns the leader name */ const std::string& GetLeaderName() { return opt_.leaderUniqueName; @@ -86,8 +86,8 @@ class LeaderElection { public: /** - * @brief ObserveLeader 监测在etcd中创建的leader节点,正常情况下一直block, - * 退出表示leader change或者从client端角度看etcd异常,进程退出 + * @brief Monitor the leader node created in etcd. Under normal circumstances, this function continuously blocks. + * Exiting indicates a leader change or, from the client's perspective, an abnormality in etcd, which leads to process termination */ int ObserveLeader(); @@ -95,10 +95,10 @@ class LeaderElection { // option LeaderElectionOptions opt_; - // realPrefix_ = leader竞选公共prefix + 自定义prefix + // realPrefix_ = leader campaign public prefix + custom prefix std::string realPrefix_; - // 竞选leader之后记录在objectManager中的id号 + // The ID number recorded in the object manager after leader election uint64_t leaderOid_; }; } // namespace election diff --git a/src/mds/nameserver2/clean_core.cpp b/src/mds/nameserver2/clean_core.cpp index 54f743c300..c8a84f39c8 100644 --- a/src/mds/nameserver2/clean_core.cpp +++ b/src/mds/nameserver2/clean_core.cpp @@ -54,9 +54,9 @@ StatusCode CleanCore::CleanSnapShotFile(const FileInfo & fileInfo, LogicalPoolID logicalPoolID = segment.logicalpoolid(); uint32_t chunkNum = segment.chunks_size(); for (uint32_t j = 0; j != chunkNum; j++) { - // 删除快照时如果chunk不存在快照,则需要修改chunk的correctedSn - // 防止删除快照后,后续的写触发chunk的快照 - // correctSn为创建快照后文件的版本号,也就是快照版本号+1 + // When deleting a snapshot, if the chunk does not have a snapshot, the correctedSn of the chunk needs to be modified + // Prevent subsequent writes from triggering Chunk snapshots after deleting snapshots + // CorrectSn is the version number of the file after creating the snapshot, which is the snapshot version number+1 SeqNum correctSn = fileInfo.seqnum() + 1; int ret = copysetClient_->DeleteChunkSnapshotOrCorrectSn( logicalPoolID, diff --git a/src/mds/nameserver2/clean_core.h b/src/mds/nameserver2/clean_core.h index 0cb4f3f8ab..8bc8b372b0 100644 --- a/src/mds/nameserver2/clean_core.h +++ b/src/mds/nameserver2/clean_core.h @@ -48,20 +48,20 @@ class CleanCore { allocStatistic_(allocStatistic) {} /** - * @brief 删除快照文件,更新task状态 - * @param snapShotFile: 需要清理的snapshot文件 - * @param progress: CleanSnapShotFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 + * @brief Delete the snapshot file and update the task status + * @param snapShotFile: The snapshot file that needs to be cleaned + * @param progress: The CleanSnapShotFile interface is a relatively asynchronous task that takes a long time + * Here, progress is transmitted for tracking and feedback */ StatusCode CleanSnapShotFile(const FileInfo & snapShotFile, TaskProgress* progress); /** - * @brief 删除普通文件,更新task状态 - * @param commonFile: 需要清理的普通文件 - * @param progress: CleanFile接口属于时间较长的偏异步任务 - * 这里传入进度进行跟踪反馈 - * @return 是否执行成功,成功返回StatusCode::kOK + * @brief Delete regular files and update task status + * @param commonFile: A regular file that needs to be cleaned + * @param progress: The CleanFile interface is a relatively asynchronous task that takes a long time + * Here, progress is transmitted for tracking and feedback + * @return whether the execution was successful, and if successful, return StatusCode::kOK */ StatusCode CleanFile(const FileInfo & commonFile, TaskProgress* progress); diff --git a/src/mds/nameserver2/clean_manager.h b/src/mds/nameserver2/clean_manager.h index 86dbbd3474..b6a1bd7c52 100644 --- a/src/mds/nameserver2/clean_manager.h +++ b/src/mds/nameserver2/clean_manager.h @@ -56,9 +56,9 @@ class CleanManagerInterface { curve::common::CountDownEvent* counter) = 0; }; /** - * CleanManager 用于异步清理 删除快照对应的数据 - * 1. 接收在线的删除快照请求 - * 2. 线程池异步处理实际的chunk删除任务 + * CleanManager is used for asynchronous cleaning and deleting data corresponding to snapshots. + * 1. Receives online requests for snapshot deletion. + * 2. Asynchronously processes the actual chunk deletion tasks in a thread pool. **/ class CleanManager : public CleanManagerInterface { public: diff --git a/src/mds/nameserver2/clean_task.h b/src/mds/nameserver2/clean_task.h index 9001312870..cd5bb89d8c 100644 --- a/src/mds/nameserver2/clean_task.h +++ b/src/mds/nameserver2/clean_task.h @@ -93,7 +93,7 @@ class Task { protected: TaskIDType taskID_; TaskProgress progress_; - // 任务最大重试次数 + // Maximum number of task retries uint32_t retry_; }; diff --git a/src/mds/nameserver2/clean_task_manager.cpp b/src/mds/nameserver2/clean_task_manager.cpp index 2a73ff87b9..f7a80ea355 100644 --- a/src/mds/nameserver2/clean_task_manager.cpp +++ b/src/mds/nameserver2/clean_task_manager.cpp @@ -66,7 +66,7 @@ void CleanTaskManager::CheckCleanResult(void) { } ++iter; } - // clean task为空,清空channelPool + // Clean task is empty, clear channelPool if (cleanTasks_.empty() && notEmptyBefore) { LOG(INFO) << "All tasks completed, clear channel pool"; channelPool_->Clear(); diff --git a/src/mds/nameserver2/clean_task_manager.h b/src/mds/nameserver2/clean_task_manager.h index 9673a0b1c4..8b4393f383 100644 --- a/src/mds/nameserver2/clean_task_manager.h +++ b/src/mds/nameserver2/clean_task_manager.h @@ -43,10 +43,10 @@ namespace mds { class CleanTaskManager { public: /** - * @brief 初始化TaskManager - * @param channelPool: 连接池 - * @param threadNum: worker线程的数量 - * @param checkPeriod: 周期性任务检查线程时间, ms + * @brief Initialize TaskManager + * @param channelPool: Connection Pool + * @param threadNum: Number of worker threads + * @param checkPeriod: Periodic task check thread time, ms */ explicit CleanTaskManager(std::shared_ptr channelPool, int threadNum = 10, int checkPeriod = 10000); @@ -55,28 +55,28 @@ class CleanTaskManager { } /** - * @brief 启动worker线程池、启动检查线程 + * @brief: Start worker thread pool, start check thread * */ bool Start(void); /** - * @brief 停止worker线程池、启动检查线程 + * @brief: Stop worker thread pool, start check thread * */ bool Stop(void); /** - * @brief 向线程池推送task - * @param task: 对应的工作任务 - * @return 推送task是否成功,如已存在对应的任务,推送是吧 + * @brief Push task to thread pool + * @param task: corresponding work task + * @return: Is the task successfully pushed? If a corresponding task already exists, is it pushed */ bool PushTask(std::shared_ptr task); /** - * @brief 获取当前的task - * @param id: 对应任务的相关文件InodeID - * @return 返回对应task的shared_ptr 或者 不存在返回nullptr + * @brief Get the current task + * @param id: The relevant file InodeID of the corresponding task + * @return returns the shared_ptr of the corresponding task or return nullptr if it does not exist */ std::shared_ptr GetTask(TaskIDType id); @@ -94,7 +94,7 @@ class CleanTaskManager { Atomic stopFlag_; InterruptibleSleeper sleeper_; - // 连接池,和chunkserverClient共享,没有任务在执行时清空 + // Connection pool, shared with chunkserverClient, no tasks cleared during execution std::shared_ptr channelPool_; }; diff --git a/src/snapshotcloneserver/clone/clone_core.cpp b/src/snapshotcloneserver/clone/clone_core.cpp index 2974ed06c8..25a98cf4d3 100644 --- a/src/snapshotcloneserver/clone/clone_core.cpp +++ b/src/snapshotcloneserver/clone/clone_core.cpp @@ -58,7 +58,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, CloneTaskType taskType, std::string poolset, CloneInfo *cloneInfo) { - // 查询数据库中是否有任务正在执行 + //Check if there are tasks executing in the database std::vector cloneInfoList; metaStore_->GetCloneInfoByFileName(destination, &cloneInfoList); bool needJudgeFileExist = false; @@ -78,22 +78,22 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, (info.GetSrc() == source) && (info.GetIsLazy() == lazyFlag) && (info.GetTaskType() == taskType)) { - // 视为同一个clone + //Treat as the same clone *cloneInfo = info; return kErrCodeTaskExist; } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 + //Treat it as a different clone, then the file is actually occupied and the return file already exists return kErrCodeFileExist; } } else if (info.GetStatus() == CloneStatus::done || info.GetStatus() == CloneStatus::error || info.GetStatus() == CloneStatus::metaInstalled) { - // 可能已经删除,需要再判断文件存不存在, - // 在已删除的条件下,允许再克隆 + //It may have been deleted, and it is necessary to determine whether the file exists again, + //Allowing further cloning under deleted conditions existCloneInfos.push_back(info); needJudgeFileExist = true; } else { - // 此时,有个相同的克隆任务正在删除中, 返回文件被占用 + //At this point, the same clone task is being deleted and the return file is occupied return kErrCodeFileExist; } } else { // is recover @@ -103,11 +103,11 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, (info.GetSrc() == source) && (info.GetIsLazy() == lazyFlag) && (info.GetTaskType() == taskType)) { - // 视为同一个clone,返回任务已存在 + //Treat as the same clone, return task already exists *cloneInfo = info; return kErrCodeTaskExist; } else { - // 视为不同的克隆,那么文件实际上已被占用,返回文件已存在 + //Treat it as a different clone, then the file is actually occupied and the return file already exists return kErrCodeFileExist; } } else if (info.GetStatus() == CloneStatus::done || @@ -115,13 +115,13 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, info.GetStatus() == CloneStatus::metaInstalled) { // nothing } else { - // 此时,有个相同的任务正在删除中, 返回文件被占用 + //At this point, the same task is being deleted and the return file is occupied return kErrCodeFileExist; } } } - // 目标文件已存在不能clone, 不存在不能recover + //The target file already exists and cannot be cloned or recovered if it does not exist FInfo destFInfo; int ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); switch (ret) { @@ -129,7 +129,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (CloneTaskType::kClone == taskType) { if (needJudgeFileExist) { bool match = false; - // 找出inodeid匹配的cloneInfo + //Find the cloneInfo that matches the inodeid for (auto& existInfo : existCloneInfos) { if (destFInfo.id == existInfo.GetDestId()) { *cloneInfo = existInfo; @@ -140,8 +140,8 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, if (match) { return kErrCodeTaskExist; } else { - // 如果没找到,那么dest file都不是这些clone任务创建的, - // 意味着文件重名了 + //If not found, then none of the dest files were created by these clone tasks, + //It means the file has a duplicate name LOG(ERROR) << "Clone dest file exist, " << "but task not match! " << "source = " << source @@ -151,7 +151,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeFileExist; } } else { - // 没有对应的cloneInfo,意味着文件重名了 + //There is no corresponding cloneInfo, which means the file has a duplicate name LOG(ERROR) << "Clone dest file must not exist" << ", source = " << source << ", user = " << user @@ -160,7 +160,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeFileExist; } } else if (CloneTaskType::kRecover == taskType) { - // recover任务,卷的poolset信息不变 + //The recover task keeps the poolset information of the volume unchanged poolset = destFInfo.poolset; } else { assert(false); @@ -183,7 +183,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeInternalError; } - // 是否为快照 + //Is it a snapshot SnapshotInfo snapInfo; CloneFileType fileType; @@ -245,7 +245,7 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, return kErrCodeFileStatusInvalid; } - // TODO(镜像克隆的用户认证待完善) + //TODO (User authentication for mirror cloning to be improved) } UUID uuid = UUIDGenerator().GenerateUUID(); @@ -256,9 +256,9 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, } else { info.SetStatus(CloneStatus::recovering); } - // 这里必须先AddCloneInfo, 因为如果先SetCloneFileStatus,然后AddCloneInfo, - // 如果AddCloneInfo失败又意外重启,将没人知道SetCloneFileStatus调用过,造成 - // 镜像无法删除 + //Here, you must first AddCloneInfo because if you first set CloneFileStatus and then AddCloneInfo, + //If AddCloneInfo fails and unexpectedly restarts, no one will know that SetCloneFileStatus has been called, causing + //Mirror cannot be deleted ret = metaStore_->AddCloneInfo(info); if (ret < 0) { LOG(ERROR) << "AddCloneInfo error" @@ -279,13 +279,13 @@ int CloneCoreImpl::CloneOrRecoverPre(const UUID &source, FileStatus::BeingCloned, mdsRootUser_); if (ret < 0) { - // 这里不处理SetCloneFileStatus的错误, - // 因为SetCloneFileStatus失败的所有结果都是可接受的, - // 相比于处理SetCloneFileStatus失败的情况更直接: - // 比如调用DeleteCloneInfo删除任务, - // 一旦DeleteCloneInfo失败,给用户返回error之后, - // 重启服务将造成Clone继续进行, - // 跟用户结果返回的结果不一致,造成用户的困惑 + //The SetCloneFileStatus error is not handled here, + //Because all results of SetCloneFileStatus failure are acceptable, + //Compared to handling SetCloneFileStatus failure, it is more direct: + //For example, calling DeleteCloneInfo to delete a task, + //Once DeleteCloneInfo fails and an error is returned to the user, + //Restarting the service will cause Clone to continue, + //Inconsistency with the results returned by the user, causing confusion for the user LOG(WARNING) << "SetCloneFileStatus encounter an error" << ", ret = " << ret << ", source = " << source @@ -311,7 +311,7 @@ int CloneCoreImpl::FlattenPre( case CloneStatus::done: case CloneStatus::cloning: case CloneStatus::recovering: { - // 已经完成的或正在进行中返回task exist, 表示不需要处理 + //A task exists is returned for completed or in progress, indicating that it does not need to be processed return kErrCodeTaskExist; } case CloneStatus::metaInstalled: { @@ -362,7 +362,7 @@ void CloneCoreImpl::HandleCloneOrRecoverTask( } } - // 在kCreateCloneMeta以后的步骤还需更新CloneChunkInfo信息中的chunkIdInfo + //In the steps after kCreateCloneMeta, it is necessary to update the chunkIdInfo in the CloneChunkInfo information if (NeedUpdateCloneMeta(task)) { ret = CreateOrUpdateCloneMeta(task, &newFileInfo, &segInfos); if (ret < 0) { @@ -504,9 +504,9 @@ int CloneCoreImpl::BuildFileInfoFromSnapshot( << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - // 从快照恢复的destinationId为目标文件的id + //The destinationId recovered from the snapshot is the ID of the target file task->GetCloneInfo().SetDestId(fInfo.id); - // 从快照恢复seqnum+1 + //Restore seqnum+1 from snapshot newFileInfo->seqnum = fInfo.seqnum + 1; } else { newFileInfo->seqnum = kInitializeSeqNum; @@ -574,7 +574,7 @@ int CloneCoreImpl::BuildFileInfoFromFile( << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; } - // GetOrAllocateSegment依赖fullPathName + //GetOrAllocateSegment depends on fullPathName fInfo.fullPathName = source; newFileInfo->chunksize = fInfo.chunksize; @@ -657,7 +657,7 @@ int CloneCoreImpl::CreateCloneFile( const auto& poolset = fInfo.poolset; std::string source = ""; - // 只有从文件克隆才带clone source + //Clone source is only available when cloning from a file if (CloneFileType::kFile == task->GetCloneInfo().GetFileType()) { source = task->GetCloneInfo().GetSrc(); } @@ -692,12 +692,12 @@ int CloneCoreImpl::CreateCloneFile( } task->GetCloneInfo().SetOriginId(fInfoOut.id); if (IsClone(task)) { - // 克隆情况下destinationId = originId; + //In the case of cloning, destinationId = originId; task->GetCloneInfo().SetDestId(fInfoOut.id); } task->GetCloneInfo().SetTime(fInfoOut.ctime); - // 如果是lazy&非快照,先不要createCloneMeta,createCloneChunk - // 等后面stage2阶段recoveryChunk之前去createCloneMeta,createCloneChunk + //If it is a lazy&non snapshot, do not createCloneMeta or createCloneChunk yet + //Wait until stage 2 recoveryChunk, go to createCloneMeta, createCloneChunk if (IsLazy(task) && IsFile(task)) { task->GetCloneInfo().SetNextStep(CloneStep::kCompleteCloneMeta); } else { @@ -742,7 +742,7 @@ int CloneCoreImpl::CreateCloneChunk( int ret = kErrCodeSuccess; uint32_t chunkSize = fInfo.chunksize; uint32_t correctSn = 0; - // 克隆时correctSn为0,恢复时为新产生的文件版本 + //When cloning, correctSn is 0, and when restoring, it is the newly generated file version if (IsClone(task)) { correctSn = 0; } else { @@ -790,13 +790,13 @@ int CloneCoreImpl::CreateCloneChunk( } } } - // 最后剩余数量不足的任务 + //Tasks with insufficient remaining quantity in the end do { tracker->WaitSome(1); std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + //Completed, no new results break; } ret = HandleCreateCloneChunkResultsAndRetry(task, tracker, results); @@ -954,13 +954,13 @@ int CloneCoreImpl::RecoverChunk( auto tracker = std::make_shared(); uint64_t workingChunkNum = 0; - // 为避免发往同一个chunk碰撞,异步请求不同的chunk + //To avoid collisions with the same chunk, asynchronous requests for different chunks for (auto & cloneSegmentInfo : segInfos) { for (auto & cloneChunkInfo : cloneSegmentInfo.second) { if (!cloneChunkInfo.second.needRecover) { continue; } - // 当前并发工作的chunk数已大于要求的并发数时,先消化一部分 + //When the current number of chunks for concurrent work exceeds the required number of concurrent tasks, digest a portion first while (workingChunkNum >= recoverChunkConcurrency_) { uint64_t completeChunkNum = 0; ret = ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd(task, @@ -971,7 +971,7 @@ int CloneCoreImpl::RecoverChunk( } workingChunkNum -= completeChunkNum; } - // 加入新的工作的chunk + //Chunk joining a new job workingChunkNum++; auto context = std::make_shared(); context->cidInfo = cloneChunkInfo.second.chunkIdInfo; @@ -1086,7 +1086,7 @@ int CloneCoreImpl::ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( return context->retCode; } } else { - // 启动一个新的分片,index++,并重置开始时间 + //Start a new shard, index++, and reset the start time context->partIndex++; context->startTime = TimeUtility::GetTimeofDaySec(); if (context->partIndex < context->totalPartNum) { @@ -1147,14 +1147,14 @@ int CloneCoreImpl::RenameCloneFile( cloneTempDir_ + "/" + task->GetCloneInfo().GetTaskId(); std::string destination = task->GetCloneInfo().GetDest(); - // 先rename + //Rename first int ret = client_->RenameCloneFile(mdsRootUser_, originId, destinationId, origin, destination); if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 有可能是已经rename过了 + //It is possible that it has already been renamed FInfo destFInfo; ret = client_->GetFileInfo(destination, mdsRootUser_, &destFInfo); if (ret != LIBCURVE_ERROR::OK) { @@ -1442,7 +1442,7 @@ bool CloneCoreImpl::NeedRetry(std::shared_ptr task, if (CloneStep::kRecoverChunk == step || CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step) { - // 文件不存在的场景下不需要再重试,因为可能已经被删除了 + //In scenarios where the file does not exist, there is no need to retry as it may have been deleted if (retCode != kErrCodeFileNotExist) { return true; } @@ -1463,7 +1463,7 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( if (LIBCURVE_ERROR::OK == ret) { // nothing } else if (-LIBCURVE_ERROR::NOTEXIST == ret) { - // 可能已经rename过了 + //Perhaps it has already been renamed newFileName = task->GetCloneInfo().GetDest(); ret = client_->GetFileInfo(newFileName, mdsRootUser_, &fInfoOut); if (ret != LIBCURVE_ERROR::OK) { @@ -1474,7 +1474,7 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( << ", taskid = " << task->GetTaskId(); return kErrCodeFileNotExist; } - // 如果是已经rename过,那么id应该一致 + //If it has already been renamed, then the id should be consistent uint64_t originId = task->GetCloneInfo().GetOriginId(); if (fInfoOut.id != originId) { LOG(ERROR) << "File is missing, fileId not equal, " @@ -1493,9 +1493,9 @@ int CloneCoreImpl::CreateOrUpdateCloneMeta( << ", taskid = " << task->GetTaskId(); return kErrCodeInternalError; } - // 更新fInfo + //Update fInfo *fInfo = fInfoOut; - // GetOrAllocateSegment 依赖fullPathName,需要在此处更新 + //GetOrAllocateSegment depends on fullPathName and needs to be updated here fInfo->fullPathName = newFileName; uint32_t segmentSize = fInfo->segmentsize; @@ -1540,7 +1540,7 @@ int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, CloneInfo *cloneInfo) { int ret = metaStore_->GetCloneInfo(taskId, cloneInfo); if (ret < 0) { - // 不存在时直接返回成功,使接口幂等 + //Directly returns success when it does not exist, making the interface idempotent return kErrCodeSuccess; } if (cloneInfo->GetUser() != user) { @@ -1576,11 +1576,11 @@ int CloneCoreImpl::CleanCloneOrRecoverTaskPre(const std::string &user, void CloneCoreImpl::HandleCleanCloneOrRecoverTask( std::shared_ptr task) { - // 只有错误的clone/recover任务才清理临时文件 + //Only the wrong clone/recover task cleans up temporary files if (CloneStatus::errorCleaning == task->GetCloneInfo().GetStatus()) { - // 错误情况下可能未清除镜像被克隆标志 + //In the event of an error, the mirror being cloned flag may not be cleared if (IsFile(task)) { - // 重新发送 + //Resend std::string source = task->GetCloneInfo().GetSrc(); NameLockGuard lockGuard(cloneRef_->GetLock(), source); if (cloneRef_->GetRef(source) == 0) { @@ -1668,10 +1668,10 @@ int CloneCoreImpl::CheckFileExists(const std::string &filename, return kErrCodeInternalError; } -// 加减引用计数的时候,接口里面会对引用计数map加锁; -// 加引用计数、处理引用计数减到0的时候,需要额外对修改的那条记录加锁。 +//When adding or subtracting reference counts, the interface will lock the reference count map; +//When adding a reference count and reducing the reference count to 0, an additional lock needs to be added to the modified record. int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { - // 先减引用计数,如果是从镜像克隆且引用计数减到0,需要修改源镜像的状态为created + //First, reduce the reference count. If you are cloning from a mirror and the reference count is reduced to 0, you need to modify the status of the source mirror to 'created' std::string source = cloneInfo.GetSrc(); if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { snapshotRef_->DecrementSnapshotRef(source); @@ -1694,7 +1694,7 @@ int CloneCoreImpl::HandleDeleteCloneInfo(const CloneInfo &cloneInfo) { } } - // 删除这条记录,如果删除失败,把前面已经减掉的引用计数加回去 + //Delete this record. If the deletion fails, add back the previously subtracted reference count int ret = metaStore_->DeleteCloneInfo(cloneInfo.GetTaskId()); if (ret != 0) { if (cloneInfo.GetFileType() == CloneFileType::kSnapshot) { diff --git a/src/snapshotcloneserver/clone/clone_core.h b/src/snapshotcloneserver/clone/clone_core.h index 19c1c20c9d..d1aa8b460b 100644 --- a/src/snapshotcloneserver/clone/clone_core.h +++ b/src/snapshotcloneserver/clone/clone_core.h @@ -51,17 +51,17 @@ class CloneCore { virtual ~CloneCore() {} /** - * @brief 克隆或恢复任务前置 + * @brief Clone or restore task ahead * - * @param source 克隆或恢复源 - * @param user 用户名 - * @param destination 克隆或恢复的目标文件名 - * @param lazyFlag 是否lazy - * @param taskType 克隆或恢复 - * @param poolset 克隆时目标文件的poolset - * @param[out] info 克隆或恢复任务信息 + * @param source Clone or restore source + * @param user username + * @param destination The target file name for cloning or restoring + * @param lazyFlag is lazy + * @param taskType clone or restore + * @param poolset The poolset of the target file during cloning + * @param[out] info Clone or restore task information * - * @return 错误码 + * @return error code */ virtual int CloneOrRecoverPre(const UUID &source, const std::string &user, @@ -72,21 +72,21 @@ class CloneCore { CloneInfo *info) = 0; /** - * @brief 处理克隆或恢复任务 + * @brief Processing cloning or recovery tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 清理克隆或恢复任务前置 + * @brief Clean clone or restore tasks ahead * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆或恢复信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo Clone or restore information * - * @return 错误码 + * @return error code */ virtual int CleanCloneOrRecoverTaskPre(const std::string &user, const TaskIdType &taskId, @@ -94,24 +94,24 @@ class CloneCore { /** - * @brief 异步处理清理克隆或恢复任务 + * @brief Asynchronous processing of clean clone or restore tasks * - * @param task 克隆或恢复任务 + * @param task Clone or restore task */ virtual void HandleCleanCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 安装克隆文件数据的前置工作 - * - 进行一些必要的检查 - * - 获取并返回克隆信息 - * - 更新数据库状态 + * @brief Pre work for installing clone file data + * - Conduct necessary inspections + * - Obtain and return clone information + * - Update database status * - * @param user 用户名 - * @param taskId 任务Id - * @param[out] cloneInfo 克隆信息 + * @param user username + * @param taskId Task Id + * @param[out] cloneInfo clone information * - * @return 错误码 + * @return error code */ virtual int FlattenPre( const std::string &user, @@ -119,100 +119,100 @@ class CloneCore { CloneInfo *cloneInfo) = 0; /** - * @brief 获取全部克隆/恢复任务列表,用于重启后恢复执行 + * @brief Get a list of all clone/restore tasks for resuming execution after reboot * - * @param[out] cloneInfos 克隆/恢复任务列表 + * @param[out] cloneInfos Clone/Restore Task List * - * @return 错误码 + * @return error code */ virtual int GetCloneInfoList(std::vector *cloneInfos) = 0; /** - * @brief 获取指定id的克隆/恢复任务 + * @brief Get the clone/restore task for the specified ID * - * @param taskId  任务id - * @param cloneInfo 克隆/恢复任务 + * @param taskId Task ID + * @param cloneInfo Clone/Restore Task * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ virtual int GetCloneInfo(TaskIdType taskId, CloneInfo *cloneInfo) = 0; /** - * @brief 获取指定文件名的克隆/恢复任务 + * @brief Get the clone/restore task for the specified file name * - * @param fileName  文件名 - * @param list 克隆/恢复任务列表 + * @param fileName File name + * @param list Clone/Restore Task List * - * @retVal 0 获取成功 - * @retVal -1 获取失败 + * @retval 0 successfully obtained + * @retval -1 failed to obtain */ virtual int GetCloneInfoByFileName( const std::string &fileName, std::vector *list) = 0; /** - * @brief 获取快照引用管理模块 + * @brief Get snapshot reference management module * - * @return 快照引用管理模块 + * @return Snapshot Reference Management Module */ virtual std::shared_ptr GetSnapshotRef() = 0; /** - * @brief 获取镜像引用管理模块 + * @brief Get Mirror Reference Management Module * - * @return 镜像引用管理模块 + * @return Image Reference Management Module */ virtual std::shared_ptr GetCloneRef() = 0; /** - * @brief 移除克隆/恢复任务 + * @brief Remove clone/restore task * - * @param task 克隆任务 + * @param task Clone task * - * @return 错误码 + * @return error code */ virtual int HandleRemoveCloneOrRecoverTask( std::shared_ptr task) = 0; /** - * @brief 检查文件是否存在 + * @brief Check if the file exists * - * @param filename 文件名 + * @param filename File name * - * @return 错误码 + * @return error code */ virtual int CheckFileExists(const std::string &filename, uint64_t inodeId) = 0; /** - * @brief 删除cloneInfo + * @brief Delete cloneInfo * - * @param cloneInfo 待删除的cloneInfo + * @param cloneInfo CloneInfo to be deleted * - * @return 错误码 + * @return error code */ virtual int HandleDeleteCloneInfo(const CloneInfo &cloneInfo) = 0; }; /** - * @brief 克隆/恢复所需chunk信息 + * @brief Chunk information required for cloning/restoring */ struct CloneChunkInfo { - // 该chunk的id信息 + // The ID information of the chunk ChunkIDInfo chunkIdInfo; - // 位置信息,如果在s3上,是objectName,否则在curvefs上,则是offset + // Location information, if on s3, it is objectName, otherwise on curves, it is offset std::string location; - // 该chunk的版本号 + // The version number of the chunk uint64_t seqNum; - // chunk是否需要recover + // Does Chunk require recover bool needRecover; }; -// 克隆/恢复所需segment信息,key是ChunkIndex In Segment, value是chunk信息 +// The segment information required for cloning/recovery, where key is ChunkIndex In Segment and value is chunk information using CloneSegmentInfo = std::map; -// 克隆/恢复所需segment信息表,key是segmentIndex +// The segment information table required for cloning/recovery, where the key is segmentIndex using CloneSegmentMap = std::map; class CloneCoreImpl : public CloneCore { @@ -291,13 +291,13 @@ class CloneCoreImpl : public CloneCore { private: /** - * @brief 从快照构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from snapshot * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed file * - * @return 错误码 + * @return error code */ int BuildFileInfoFromSnapshot( std::shared_ptr task, @@ -305,13 +305,13 @@ class CloneCoreImpl : public CloneCore { CloneSegmentMap *segInfos); /** - * @brief 从源文件构建克隆/恢复的文件信息 + * @brief Build clone/restore file information from source files * - * @param task 任务信息 - * @param[out] newFileInfo 新构建的文件信息 - * @param[out] segInfos 新构建文件的segment信息 + * @param task task information + * @param[out] newFileInfo Newly constructed file information + * @param[out] segInfos The segment information of the newly constructed file * - * @return 错误码 + * @return error code */ int BuildFileInfoFromFile( std::shared_ptr task, @@ -320,36 +320,36 @@ class CloneCoreImpl : public CloneCore { /** - * @brief 判断是否需要更新CloneChunkInfo信息中的chunkIdInfo + * @brief to determine if it is necessary to update chunkIdInfo in CloneChunkInfo information * - * @param task 任务信息 + * @param task task information * - * @retVal true 需要更新 - * @retVal false 不需要更新 + * @retval true needs to be updated + * @retval false No update required */ bool NeedUpdateCloneMeta( std::shared_ptr task); /** - * @brief 判断clone失败后是否需要重试 + * @brief: Determine whether to retry after clone failure * - * @param task 任务信息 - * @param retCode 错误码 + * @param task task information + * @param retCode error code * - * @retVal true 需要 - * @retVal false 不需要 + * @retval true requires + * @retval false No need */ bool NeedRetry(std::shared_ptr task, int retCode); /** - * @brief 创建clone的元数据信息或更新元数据信息 + * @brief Create metadata information for clone or update metadata information * - * @param task 任务信息 - * @param[int][out] fInfo 新创建的文件信息 - * @param[int][out] segInfos 文件的segment信息 + * @param task task information + * @param[int][out] fInfo Newly created file information + * @param[int][out] segInfosThe segment information of the file * - * @return 错误码 + * @return error code */ int CreateOrUpdateCloneMeta( std::shared_ptr task, @@ -357,25 +357,25 @@ class CloneCoreImpl : public CloneCore { CloneSegmentMap *segInfos); /** - * @brief 创建新clone文件 + * @brief Create a new clone file * - * @param task 任务信息 - * @param fInfo 需创建的文件信息 + * @param task task information + * @param fInfo File information to be created * - * @return 错误码 + * @return error code */ int CreateCloneFile( std::shared_ptr task, const FInfo &fInfo); /** - * @brief 创建新文件的源信息(创建segment) + * @brief Create source information for new files (create segments) * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ int CreateCloneMeta( std::shared_ptr task, @@ -383,13 +383,13 @@ class CloneCoreImpl : public CloneCore { CloneSegmentMap *segInfos); /** - * @brief 创建新clone文件的chunk + * @brief Create a chunk for a new clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ int CreateCloneChunk( std::shared_ptr task, @@ -397,13 +397,13 @@ class CloneCoreImpl : public CloneCore { CloneSegmentMap *segInfos); /** - * @brief 开始CreateCloneChunk的异步请求 + * @brief Start asynchronous request for CreateCloneChunk * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param context CreateCloneChunk上下文 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param context CreateCloneChunk context * - * @return 错误码 + * @return error code */ int StartAsyncCreateCloneChunk( std::shared_ptr task, @@ -411,13 +411,13 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 处理CreateCloneChunk的结果并重试 + * @brief Process the results of CreateCloneChunk and try again * - * @param task 任务信息 - * @param tracker CreateCloneChunk任务追踪器 - * @param results CreateCloneChunk结果列表 + * @param task task information + * @param tracker CreateCloneChunk Task Tracker + * @param results CreateCloneChunk result list * - * @return 错误码 + * @return error code */ int HandleCreateCloneChunkResultsAndRetry( std::shared_ptr task, @@ -425,13 +425,13 @@ class CloneCoreImpl : public CloneCore { const std::list &results); /** - * @brief 通知mds完成源数据创建步骤 + * @brief Notify mds to complete the step of creating source data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ int CompleteCloneMeta( std::shared_ptr task, @@ -439,13 +439,13 @@ class CloneCoreImpl : public CloneCore { const CloneSegmentMap &segInfos); /** - * @brief 恢复chunk,即通知chunkserver拷贝数据 + * @brief Restore Chunk, that is, notify Chunkserver to copy data * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ int RecoverChunk( std::shared_ptr task, @@ -453,13 +453,13 @@ class CloneCoreImpl : public CloneCore { const CloneSegmentMap &segInfos); /** - * @brief 开始RecoverChunk的异步请求 + * @brief Start asynchronous request for RecoverChunk * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪器 - * @param context RecoverChunk上下文 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param context RecoverChunk Context * - * @return 错误码 + * @return error code */ int StartAsyncRecoverChunkPart( std::shared_ptr task, @@ -467,13 +467,13 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr context); /** - * @brief 继续RecoverChunk的其他部分的请求以及等待完成某些RecoverChunk + * @brief Continue requests for other parts of the RecoverChunk and wait for certain RecoverChunks to be completed * - * @param task 任务信息 - * @param tracker RecoverChunk异步任务跟踪者 - * @param[out] completeChunkNum 完成的chunk数 + * @param task task information + * @param tracker RecoverChunk Asynchronous task tracker + * @param[out] completeChunkNum Number of chunks completed * - * @return 错误码 + * @return error code */ int ContinueAsyncRecoverChunkPartAndWaitSomeChunkEnd( std::shared_ptr task, @@ -481,37 +481,37 @@ class CloneCoreImpl : public CloneCore { uint64_t *completeChunkNum); /** - * @brief 修改克隆文件的owner + * @brief Modify the owner of the cloned file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ int ChangeOwner( std::shared_ptr task, const FInfo &fInfo); /** - * @brief 重命名克隆文件 + * @brief Rename clone file * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 + * @param task task information + * @param fInfo File information for new files * - * @return 错误码 + * @return error code */ int RenameCloneFile( std::shared_ptr task, const FInfo &fInfo); /** - * @brief 通知mds完成数据创建 + * @brief Notify mds to complete data creation * - * @param task 任务信息 - * @param fInfo 新文件的文件信息 - * @param segInfos 新文件所需的segment信息 + * @param task task information + * @param fInfo File information for new files + * @param segInfos The segment information required for a new file * - * @return 错误码 + * @return error code */ int CompleteCloneFile( std::shared_ptr task, @@ -519,58 +519,58 @@ class CloneCoreImpl : public CloneCore { const CloneSegmentMap &segInfos); /** - * @brief 从快照克隆时,更新快照状态,通知克隆完成 + * @brief: When cloning from a snapshot, update the snapshot status and notify the clone to complete * - * @param task 任务信息 + * @param task task information * - * @return 错误码 + * @return error code */ int UpdateSnapshotStatus( std::shared_ptr task); /** - * @brief 处理Lazy克隆/恢复阶段一结束 + * @brief Processing Lazy Clone/Restore Phase 1 End * - * @param task 任务信息 + * @param task task information */ void HandleLazyCloneStage1Finish( std::shared_ptr task); /** - * @brief 处理克隆/恢复成功 + * @brief Successfully processed clone/restore * - * @param task 任务信息 + * @param task task information */ void HandleCloneSuccess(std::shared_ptr task); /** - * @brief 处理克隆或恢复失败 + * @brief processing clone or restore failed * - * @param task 任务信息 - * @param retCode 待处理的错误码 + * @param task task information + * @param retCode pending error code */ void HandleCloneError(std::shared_ptr task, int retCode); /** - * @brief Lazy Clone 情况下处理Clone任务失败重试 + * @brief Lazy Clone failed to process Clone task and retry * - * @param task 任务信息 + * @param task task information */ void HandleCloneToRetry(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务成功 + * @brief Successfully processed cleanup clone or restore task * - * @param task 任务信息 + * @param task task information */ void HandleCleanSuccess(std::shared_ptr task); /** - * @brief 处理清理克隆或恢复任务失败 + * @brief processing cleanup clone or recovery task failed * - * @param task 任务信息 + * @param task task information */ void HandleCleanError(std::shared_ptr task); @@ -587,19 +587,19 @@ class CloneCoreImpl : public CloneCore { std::shared_ptr snapshotRef_; std::shared_ptr cloneRef_; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize_; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir_; // mds root user std::string mdsRootUser_; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency_; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency_; - // client异步请求重试时间 + // Client asynchronous request retry time uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; }; diff --git a/src/snapshotcloneserver/clone/clone_service_manager.cpp b/src/snapshotcloneserver/clone/clone_service_manager.cpp index 9b7439fecf..9d2c60e130 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_service_manager.cpp @@ -67,7 +67,7 @@ int CloneServiceManager::CloneFile(const UUID &source, bool lazyFlag, std::shared_ptr closure, TaskIdType *taskId) { - // 加锁防止并发 + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); @@ -79,7 +79,7 @@ int CloneServiceManager::CloneFile(const UUID &source, CloneTaskType::kClone, poolset, &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); @@ -112,7 +112,7 @@ int CloneServiceManager::RecoverFile(const UUID &source, bool lazyFlag, std::shared_ptr closure, TaskIdType *taskId) { - // 加锁防止并发 + // Locking to prevent concurrency NameLockGuard lockDestFileGuard(*destFileLock_, destination); brpc::ClosureGuard guard(closure.get()); closure->SetDestFileLock(destFileLock_); @@ -124,7 +124,7 @@ int CloneServiceManager::RecoverFile(const UUID &source, CloneTaskType::kRecover, "", &cloneInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface idempotent *taskId = cloneInfo.GetTaskId(); closure->SetTaskId(*taskId); closure->SetErrCode(kErrCodeSuccess); @@ -546,7 +546,7 @@ int CloneServiceManager::GetFinishedCloneTask( LOG(ERROR) << "can not reach here!" << " status = " << static_cast( newInfo.GetStatus()); - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is possible to enter here return kErrCodeInternalError; } return kErrCodeSuccess; @@ -592,11 +592,11 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { cloneInfo.GetTaskId(), taskInfo, cloneCore_); bool isLazy = cloneInfo.GetIsLazy(); int ret = kErrCodeSuccess; - // Lazy 克隆/恢复 + // Lazy Clone/Restore if (isLazy) { CloneStep step = cloneInfo.GetNextStep(); - // 处理kRecoverChunk,kCompleteCloneFile,kEnd这三个阶段的Push到stage2Pool - // 如果克隆source类型是file,阶段为kCreateCloneChunk和kCreateCloneMeta也需要push到stage2Pool // NOLINT + // Process the Push to stage2Pool for the three stages of kRecoverChunk,kCompleteCloneFile, and kEnd + // If the clone source type is file and the stages are kCreateCloneChunk and kCreateCloneMeta, they also need to be pushed to stage2Pool// NOLINT if (CloneStep::kRecoverChunk == step || CloneStep::kCompleteCloneFile == step || CloneStep::kEnd == step || @@ -610,9 +610,9 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { << ", ret = " << ret; return ret; } - // 否则push到stage1Pool + // Otherwise, push to stage1Pool } else { - // stage1的task包含了异步的请求的返回,需要加锁 + // The task of stage1 contains the return of asynchronous requests that require locking std::string destination = cloneInfo.GetDest(); NameLockGuard lockDestFileGuard(*destFileLock_, destination); closure->SetDestFileLock(destFileLock_); @@ -625,7 +625,7 @@ int CloneServiceManager::RecoverCloneTaskInternal(const CloneInfo &cloneInfo) { return ret; } } - // 非Lazy 克隆/恢复push到commonPool + // Non Lazy clone/restore push to commonPool } else { ret = cloneTaskMgr_->PushCommonTask(task); if (ret < 0) { @@ -663,7 +663,7 @@ int CloneServiceManager::RecoverCloneTask() { for (auto &cloneInfo : list) { switch (cloneInfo.GetStatus()) { case CloneStatus::retrying: { - // 重置重试任务的状态 + // Reset the status of the retry task if (cloneInfo.GetTaskType() == CloneTaskType::kClone) { cloneInfo.SetStatus(CloneStatus::cloning); } else { @@ -673,7 +673,7 @@ int CloneServiceManager::RecoverCloneTask() { FALLTHROUGH_INTENDED; case CloneStatus::cloning: case CloneStatus::recovering: { - // 建立快照或镜像的引用关系 + // Establishing a reference relationship for a snapshot or mirror if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); @@ -696,7 +696,7 @@ int CloneServiceManager::RecoverCloneTask() { break; } case CloneStatus::metaInstalled: { - // metaInstalled 状态下的克隆对文件仍然有依赖,需要建立引用关系 + // Clones in MetaInstalled state still have dependencies on files and need to establish a reference relationship if (CloneFileType::kSnapshot == cloneInfo.GetFileType()) { cloneCore_->GetSnapshotRef()->IncrementSnapshotRef( cloneInfo.GetSrc()); @@ -713,10 +713,10 @@ int CloneServiceManager::RecoverCloneTask() { return kErrCodeSuccess; } -// 当clone处于matainstall状态,且克隆卷已经删除的情况下,原卷的引用计数没有减。 -// 这个后台线程处理函数周期性的检查这个场景,如果发现有clone处于metaintalled状态 -// 且克隆卷已经删除,就去删除这条无效的clone信息,并减去原卷的引用计数。 -// 如果原卷是镜像且引用计数减为0,还需要去mds把原卷的状态改为created。 +// When the clone is in a matainstall state and the clone volume has been deleted, the reference count of the original volume does not decrease. +// This backend thread processing function periodically checks this scenario, and if any clones are found to be in the 'metaled' state +// If the clone volume has been deleted, delete the invalid clone information and subtract the reference count of the original volume. +// If the original volume is a mirror and the reference count is reduced to 0, it is necessary to go to MDS to change the status of the original volume to created. void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc start"; while (!isStop_.load()) { @@ -730,28 +730,28 @@ void CloneServiceManagerBackendImpl::Func() { for (auto &it : cloneInfos) { if (it.GetStatus() == CloneStatus::metaInstalled && it.GetIsLazy() == true) { - // 检查destination在不在 + // Check if the destination is available if (it.GetTaskType() == CloneTaskType::kClone) { ret = cloneCore_->CheckFileExists(it.GetDest(), it.GetDestId()); } else { - // rename时,inodeid恢复成 + // When renaming, the inodeid is restored to ret = cloneCore_->CheckFileExists(it.GetDest(), it.GetOriginId()); } if (ret == kErrCodeFileNotExist) { - // 如果克隆卷是metaInstalled状态,且destination文件不存在, - // 删除这条cloneInfo,并减引用计数 + // If the cloned volume is in a metaInstalled state and the destination file does not exist, + // Delete this cloneInfo and subtract the reference count TaskIdType taskId = it.GetTaskId(); CloneInfo cloneInfo; ret = cloneCore_->GetCloneInfo(taskId, &cloneInfo); if (ret != kErrCodeSuccess) { - // cloneInfo已经不存在了 + // CloneInfo no longer exists continue; } - // 再次检查cloneInfo是否是metaInstalled状态 + // Check again if cloneInfo is in the metaInstalled state if (cloneInfo.GetStatus() != CloneStatus::metaInstalled) { continue; } @@ -771,7 +771,7 @@ void CloneServiceManagerBackendImpl::Func() { LOG(INFO) << "backend scan list, size = " << cloneInfos.size() << ", delete clone record count = " << deleteCount; - // 控制每轮扫描间隔 + // Control the scanning interval of each round roundWaitInterval_.WaitForNextExcution(); } LOG(INFO) << "CloneServiceManager BackEndReferenceScanFunc exit"; diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 0cd66e9d09..83fa99f242 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -161,7 +161,7 @@ class CloneServiceManagerBackend { virtual ~CloneServiceManagerBackend() {} /** - * @brief 后台扫描线程执行函数,扫描克隆卷是否存在 + * @brief Background scan thread execution function to scan for the existence of cloned volumes * */ virtual void Func() = 0; @@ -191,13 +191,13 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { private: std::shared_ptr cloneCore_; - // 后台扫描线程,扫描clone卷是否存在 + //Background scan thread to check if clone volume exists std::thread backEndReferenceScanThread_; - // 当前后台扫描是否停止,用于支持start,stop功能 + //Is the current background scanning stopped? Used to support start and stop functions std::atomic_bool isStop_; - // 后台扫描线程记录使用定时器 + //Using a timer for background scanning thread records common::WaitInterval recordWaitInterval_; - // 后台扫描线程每轮使用定时器 + //The backend scanning thread uses a timer for each round common::WaitInterval roundWaitInterval_; }; @@ -215,36 +215,36 @@ class CloneServiceManager { virtual ~CloneServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ virtual int Init(const SnapshotCloneServerOptions &option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 从文件或快照克隆出一个文件 + * @brief Clone a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ virtual int CloneFile(const UUID &source, const std::string &user, @@ -255,16 +255,16 @@ class CloneServiceManager { TaskIdType *taskId); /** - * @brief 从文件或快照恢复一个文件 + * @brief Restore a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件名 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source Uuid of file or snapshot + * @param user The user of the file or snapshot + * @param destination destination destination file name + * @param lazyFlag Is in lazy mode + * @param closure asynchronous callback entity + * @param[out] taskId Task ID * - * @return 错误码 + * @return error code */ virtual int RecoverFile(const UUID &source, const std::string &user, @@ -274,36 +274,36 @@ class CloneServiceManager { TaskIdType *taskId); /** - * @brief 安装克隆文件的数据,用于Lazy克隆 + * @brief Install data from clone files for Lazy cloning * - * @param user 用户 - * @param taskId 任务ID + * @param user user + * @param taskId Task ID * - * @return 错误码 + * @return error code */ virtual int Flatten( const std::string &user, const TaskIdType &taskId); /** - * @brief 查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user * - * @param user 用户名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ virtual int GetCloneTaskInfo(const std::string &user, std::vector *info); /** - * @brief 通过Id查询某个用户的克隆/恢复任务信息 + * @brief: Query the clone/restore task information of a certain user through ID * - * @param user 用户名 - * @param taskId 指定的任务Id - * @param info 克隆/恢复任务信息 + * @param user username + * @param taskId Task Id specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ virtual int GetCloneTaskInfoById( const std::string &user, @@ -311,13 +311,13 @@ class CloneServiceManager { std::vector *info); /** - * @brief 通过文件名查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user through a file name * - * @param user 用户名 - * @param fileName 指定的文件名 - * @param info 克隆/恢复任务信息 + * @param user username + * @param fileName The file name specified + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ virtual int GetCloneTaskInfoByName( const std::string &user, @@ -325,44 +325,44 @@ class CloneServiceManager { std::vector *info); /** - * @brief 通过过滤条件查询某个用户的克隆/恢复任务信息 + * @brief: Query a user's clone/restore task information through filtering criteria * - * @param filter 过滤条件 - * @param info 克隆/恢复任务信息 + * @param filter filtering conditions + * @param info Clone/Restore Task Information * - * @return 错误码 + * @return error code */ virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition &filter, std::vector *info); /** - * @brief 查询src是否有依赖 + * @brief: Check if src has dependencies * - * @param src 指定的文件名 - * @param refStatus 0表示没有依赖,1表示有依赖,2表示需要进一步确认 - * @param needCheckFiles 需要进一步确认的文件列表 + * @param src specified file name + * @param refStatus 0 indicates no dependencies, 1 indicates dependencies, and 2 indicates further confirmation is needed + * @param needCheckFiles List of files that require further confirmation * - * @return 错误码 + * @return error code */ virtual int GetCloneRefStatus(const std::string &src, CloneRefStatus *refStatus, std::vector *needCheckFiles); /** - * @brief 清除失败的clone/Recover任务、状态、文件 + * @brief Clear failed clone/recover tasks, status, files * - * @param user 用户名 - * @param taskId 任务Id + * @param user username + * @param taskId Task Id * - * @return 错误码 + * @return error code */ virtual int CleanCloneTask(const std::string &user, const TaskIdType &taskId); /** - * @brief 重启后恢复未完成clone和recover任务 + * @brief: Restore unfinished clone and recover tasks after restarting * - * @return 错误码 + * @return error code */ virtual int RecoverCloneTask(); @@ -373,80 +373,80 @@ class CloneServiceManager { private: /** - * @brief 从给定的任务列表中获取指定用户的任务集 + * @brief Get the task set of the specified user from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param user 用户信息 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param user user information + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, const std::string &user, std::vector *info); /** - * @brief 从给定的任务列表中获取符合过滤条件的任务集 + * @brief Retrieve task sets that meet the filtering criteria from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param filter 过滤条件 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos Clone/Restore Information + * @param filter filtering conditions + * @param[out] info Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, CloneFilterCondition filter, std::vector *info); /** - * @brief 获取已经完成任务信息 + * @brief to obtain completed task information * - * @param taskId 任务ID - * @param taskCloneInfoOut 克隆任务信息 + * @param taskId Task ID + * @param taskCloneInfoOut Clone task information * - * @return 错误码 + * @return error code */ int GetFinishedCloneTask( const TaskIdType &taskId, TaskCloneInfo *taskCloneInfoOut); /** - * @brief 根据克隆任务信息恢复克隆任务 + * @brief Restore clone task based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ int RecoverCloneTaskInternal(const CloneInfo &cloneInfo); /** - * @brief 根据克隆任务信息恢复清除克隆任务 + * @brief Restore and clear clone tasks based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo Clone task information * - * @return 错误码 + * @return error code */ int RecoverCleanTaskInternal(const CloneInfo &cloneInfo); /** - * @brief 构建和push Lazy的任务 + *Task of building and pushing Lazy @brief * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverLazyTask( CloneInfo cloneInfo, std::shared_ptr closure); /** - * @brief 构建和push 非Lazy的任务 + * @brief Build and push non Lazy tasks * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo Clone task information + * @param closure asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverNotLazyTask( CloneInfo cloneInfo, diff --git a/src/snapshotcloneserver/clone/clone_task.h b/src/snapshotcloneserver/clone/clone_task.h index 8ea5c6be51..e0c403d475 100644 --- a/src/snapshotcloneserver/clone/clone_task.h +++ b/src/snapshotcloneserver/clone/clone_task.h @@ -145,9 +145,9 @@ struct SnapCloneCommonClosure : public SnapCloneClosure { }; struct CreateCloneChunkContext { - // 数据源 + // Data source std::string location; - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seqNum uint64_t sn; @@ -155,15 +155,15 @@ struct CreateCloneChunkContext { uint64_t csn; // chunk size uint64_t chunkSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; - // chunk信息 + // Chunk Information struct CloneChunkInfo *cloneChunkInfo; }; @@ -197,21 +197,21 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { }; struct RecoverChunkContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; - // chunk的分片index + // Chunk's sharding index uint64_t partIndex; - // 总的chunk分片数 + // Total Chunk Fragments uint64_t totalPartNum; - // 分片大小 + // Slice size uint64_t partSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; diff --git a/src/snapshotcloneserver/clone/clone_task_manager.cpp b/src/snapshotcloneserver/clone/clone_task_manager.cpp index be14fc5db6..d12677a3df 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.cpp +++ b/src/snapshotcloneserver/clone/clone_task_manager.cpp @@ -48,7 +48,7 @@ int CloneTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 + // isStop_ Flag set first to prevent backEndThread from exiting first backEndThread = std::thread(&CloneTaskManager::BackEndThreadFunc, this); } @@ -111,9 +111,9 @@ int CloneTaskManager::PushTaskInternal(std::shared_ptr task, std::map > *taskMap, Mutex *taskMapMutex, std::shared_ptr taskPool) { - // 同一个clone的Stage1的Task和Stage2的Task的任务ID是一样的, - // clean task的ID也是一样的, - // 触发一次扫描,将已完成的任务Flush出去 + // The task IDs for Stage1 and Stage2 of the same clone are the same, + // The ID of the clean task is also the same, + // Trigger a scan to flush out completed tasks ScanStage2Tasks(); ScanStage1Tasks(); ScanCommonTasks(); @@ -177,13 +177,13 @@ void CloneTaskManager::ScanCommonTasks() { for (auto it = commonTaskMap_.begin(); it != commonTaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); - // 移除任务并更新metric + // Remove task and update metric cloneMetric_->UpdateAfterTaskFinish(taskType, status); LOG(INFO) << "common task {" << " TaskInfo : " << *taskInfo @@ -203,7 +203,7 @@ void CloneTaskManager::ScanStage1Tasks() { for (auto it = stage1TaskMap_.begin(); it != stage1TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理已完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); @@ -227,13 +227,13 @@ void CloneTaskManager::ScanStage2Tasks() { for (auto it = stage2TaskMap_.begin(); it != stage2TaskMap_.end();) { auto taskInfo = it->second->GetTaskInfo(); - // 处理完成的任务 + // Process completed tasks if (taskInfo->IsFinish()) { CloneTaskType taskType = taskInfo->GetCloneInfo().GetTaskType(); CloneStatus status = taskInfo->GetCloneInfo().GetStatus(); - // retrying 状态的任务需要重试 + // Tasks in the retrying state need to be retried if (CloneStatus::retrying == status) { if (CloneTaskType::kClone == taskType) { taskInfo->GetCloneInfo(). @@ -244,7 +244,7 @@ void CloneTaskManager::ScanStage2Tasks() { } taskInfo->Reset(); stage2Pool_->PushTask(it->second); - // 其他任务结束更新metric + // Update metric after completing other tasks } else { cloneMetric_->UpdateAfterFlattenTaskFinish(status); LOG(INFO) << "stage2 task {" diff --git a/src/snapshotcloneserver/clone/clone_task_manager.h b/src/snapshotcloneserver/clone/clone_task_manager.h index d9607ccedc..8f7565d561 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.h +++ b/src/snapshotcloneserver/clone/clone_task_manager.h @@ -78,35 +78,35 @@ class CloneTaskManager { void Stop(); /** - * @brief 往任务管理器中加入任务 + * @brief Add a task to the task manager * - * 用于非Lazy克隆及其他删除克隆等管控面的请求 + * Request for non Lazy clones and other deletion of control surfaces such as clones * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ int PushCommonTask( std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段一的的任务 + * @brief Add LazyClone Phase 1 tasks to the task manager * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ int PushStage1Task( std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段二的的任务 + * @brief: Add LazyClone Phase 2 tasks to the task manager * - * 目前只用于重启恢复时,将Lazy克隆恢复克隆数据阶段的任务加入任务管理器 + * At present, it is only used for adding tasks from the Lazy clone recovery clone data stage to the task manager during restart recovery * - * @param task 任务 + * @param task task * - * @return 错误码 + * @return error code */ int PushStage2Task( std::shared_ptr task); @@ -120,14 +120,14 @@ class CloneTaskManager { void ScanStage2Tasks(); /** - * @brief 往对应线程池和map中push任务 + * @brief pushes tasks to the corresponding thread pool and map * - * @param task 任务 - * @param taskMap 任务表 - * @param taskMapMutex 任务表和线程池的锁 - * @param taskPool 线程池 + * @param task task + * @param taskMap task table + * @param taskMapMutex task table and thread pool locks + * @param taskPool Thread Pool * - * @return 错误码 + * @return error code */ int PushTaskInternal( std::shared_ptr task, @@ -136,35 +136,35 @@ class CloneTaskManager { std::shared_ptr taskPool); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->克隆任务表 + // ID ->Clone Task Table std::map > cloneTaskMap_; mutable RWLock cloneTaskMapLock_; - // 存放stage1Pool_池的当前任务,key为destination + // Storing stage1Pool_ The current task of the pool, with key as destination std::map > stage1TaskMap_; mutable Mutex stage1TasksLock_; - // 存放stage1Poo2_池的当前任务,key为destination + // Storage stage1Poo2_ The current task of the pool, with key as destination std::map > stage2TaskMap_; mutable Mutex stage2TasksLock_; - // 存放commonPool_池的当前任务 + // Store commonPool_ Current task of the pool std::map > commonTaskMap_; mutable Mutex commonTasksLock_; - // 用于Lazy克隆元数据部分的线程池 + // Thread pool for Lazy clone metadata section std::shared_ptr stage1Pool_; - // 用于Lazy克隆数据部分的线程池 + // Thread pool for Lazy clone data section std::shared_ptr stage2Pool_; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池 + // Thread pool for requests for non Lazy clones and deletion of clones and other control surfaces std::shared_ptr commonPool_; - // 当前任务管理是否停止,用于支持start,stop功能 + //Is the current task management stopped? Used to support start and stop functions std::atomic_bool isStop_; // clone core @@ -173,7 +173,7 @@ class CloneTaskManager { // metric std::shared_ptr cloneMetric_; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs_; }; diff --git a/src/snapshotcloneserver/common/config.h b/src/snapshotcloneserver/common/config.h index d5e93a24c1..f624967fd4 100644 --- a/src/snapshotcloneserver/common/config.h +++ b/src/snapshotcloneserver/common/config.h @@ -41,9 +41,9 @@ struct CurveClientOptions { std::string mdsRootUser; // mds root password std::string mdsRootPassword; - // 调用client方法的重试总时间 + // The total retry time for calling the client method uint64_t clientMethodRetryTimeSec; - // 调用client方法重试间隔时间 + // Call client method retry interval uint64_t clientMethodRetryIntervalMs; }; @@ -51,48 +51,48 @@ struct CurveClientOptions { struct SnapshotCloneServerOptions { // snapshot&clone server address std::string addr; - // 调用client异步方法重试总时间 + // Total retry time for calling client asynchronous methods uint64_t clientAsyncMethodRetryTimeSec; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs; - // 快照工作线程数 + // Number of snapshot worker threads int snapshotPoolThreadNum; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) uint32_t snapshotTaskManagerScanIntervalMs; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit; // snapshotcore threadpool threadNum uint32_t snapshotCoreThreadNum; // mdsSessionTimeUs uint32_t mdsSessionTimeUs; - // ReadChunkSnapshot同时进行的异步请求数量 + // The number of asynchronous requests simultaneously processed by ReadChunkSnapshot uint32_t readChunkSnapshotConcurrency; - // 用于Lazy克隆元数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone metadata section int stage1PoolThreadNum; - // 用于Lazy克隆数据部分的线程池线程数 + // Number of thread pool threads used for Lazy clone data section int stage2PoolThreadNum; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池线程数 + // Number of thread pool threads used for requests for non Lazy clones and deletion of clones and other control surfaces int commonPoolThreadNum; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs; - // clone chunk分片大小 + // Clone chunk shard size uint64_t cloneChunkSplitSize; - // 克隆临时目录 + // Clone temporary directory std::string cloneTempDir; // mds root user std::string mdsRootUser; - // CreateCloneChunk同时进行的异步请求数量 + // Number of asynchronous requests made simultaneously by CreateCloneChunk uint32_t createCloneChunkConcurrency; - // RecoverChunk同时进行的异步请求数量 + // Number of asynchronous requests simultaneously made by RecoverChunk uint32_t recoverChunkConcurrency; - // 引用计数后台扫描每条记录间隔 + // Reference Count Background Scan Each Record Interval uint32_t backEndReferenceRecordScanIntervalMs; - // 引用计数后台扫描每轮间隔 + // Reference Count Background Scan Every Round Interval uint32_t backEndReferenceFuncScanIntervalMs; // dlock options DLockOpts dlockOpts; diff --git a/src/snapshotcloneserver/common/curvefs_client.h b/src/snapshotcloneserver/common/curvefs_client.h index 131f01659c..ad04f6f4b2 100644 --- a/src/snapshotcloneserver/common/curvefs_client.h +++ b/src/snapshotcloneserver/common/curvefs_client.h @@ -95,69 +95,69 @@ class CurveFsClient { virtual ~CurveFsClient() {} /** - * @brief client 初始化 + * @brief client initialization * - * @return 错误码 + * @return error code */ virtual int Init(const CurveClientOptions &options) = 0; /** - * @brief client 资源回收 + * @brief client resource recycling * - * @return 错误码 + * @return error code */ virtual int UnInit() = 0; /** - * @brief 创建快照 + * @brief Create a snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param[out] seq 快照版本号 + * @param filename File name + * @param user user information + * @param[out] seq snapshot version number * - * @return 错误码 + * @return error code */ virtual int CreateSnapshot(const std::string &filename, const std::string &user, uint64_t *seq) = 0; /** - * @brief 删除快照 + * @brief Delete snapshot * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 + * @param filename File name + * @param user user information + * @param seq snapshot version number * - * @return 错误码 + * @return error code */ virtual int DeleteSnapshot(const std::string &filename, const std::string &user, uint64_t seq) = 0; /** - * @brief 获取快照文件信息 + * @brief Get snapshot file information * - * @param filename 文件名 - * @param user 用户名 - * @param seq 快照版本号 - * @param[out] snapInfo 快照文件信息 + * @param filename File name + * @param user username + * @param seq snapshot version number + * @param[out] snapInfo snapshot file information * - * @return 错误码 + * @return error code */ virtual int GetSnapshot(const std::string &filename, const std::string &user, uint64_t seq, FInfo* snapInfo) = 0; /** - * @brief 查询快照文件segment信息 + * @brief Query snapshot file segment information * - * @param filename 文件名 - * @param user 用户信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param segInfo segment信息 + * @param filename File name + * @param user user information + * @param seq snapshot version number + * @param offset offset value + * @param segInfo segment information * - * @return 错误码 + * @return error code */ virtual int GetSnapshotSegmentInfo(const std::string &filename, const std::string &user, @@ -166,16 +166,16 @@ class CurveFsClient { SegmentInfo *segInfo) = 0; /** - * @brief 读取snapshot chunk的数据 + * @brief Read snapshot chunk data * - * @param cidinfo chunk ID 信息 - * @param seq 快照版本号 - * @param offset 偏移值 - * @param len 长度 - * @param[out] buf buffer指针 - * @param: scc是异步回调 + * @param cidinfo chunk ID information + * @param seq snapshot version number + * @param offset offset value + * @param len length + * @param[out] buf buffer pointer + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ virtual int ReadChunkSnapshot(ChunkIDInfo cidinfo, uint64_t seq, @@ -185,11 +185,11 @@ class CurveFsClient { SnapCloneClosure* scc) = 0; /** - * 获取快照状态 - * @param: userinfo是用户信息 - * @param: filenam文件名 - * @param: seq是文件版本号信息 - * @param: filestatus 快照文件状态 + *Get snapshot status + * @param: userinfo is the user information + * @param: filenam file name + * @param: seq is the file version number information + * @param: filestatus Snapshot file status */ virtual int CheckSnapShotStatus(std::string filename, std::string user, @@ -197,33 +197,33 @@ class CurveFsClient { FileStatus* filestatus) = 0; /** - * @brief 获取chunk的版本号信息 + * @brief to obtain the version number information of the chunk * - * @param cidinfo chunk ID 信息 - * @param chunkInfo chunk详细信息 + * @param cidinfo chunk ID information + * @param chunkInfo chunk Details * - * @return 错误码 + * @return error code */ virtual int GetChunkInfo(const ChunkIDInfo &cidinfo, ChunkInfoDetail *chunkInfo) = 0; /** - * @brief 创建clone文件 + * @brief Create clone file * @detail - * - 若是clone,sn重置为初始值 - * - 若是recover,sn不变 - * - * @param source clone源文件名 - * @param filename clone目标文件名 - * @param user 用户信息 - * @param size 文件大小 - * @param sn 版本号 - * @param chunkSize chunk大小 + * - If clone, reset sn to initial value + * - If recover, sn remains unchanged + * + * @param source clone Source file name + * @param filename clone Target filename + * @param user user information + * @param size File size + * @param sn version number + * @param chunkSize chunk size * @param stripeUnit stripe size * @param stripeCount stripe count - * @param[out] fileInfo 文件信息 + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ virtual int CreateCloneFile( const std::string &source, @@ -238,20 +238,20 @@ class CurveFsClient { FInfo* fileInfo) = 0; /** - * @brief lazy 创建clone chunk + * @brief lazy creation of a clone chunk * @detail - * - location的格式定义为 A@B的形式。 - * - 如果源数据在s3上,则location格式为uri@s3,uri为实际chunk对象的地址; - * - 如果源数据在curvefs上,则location格式为/filename/chunkindex@cs - * - * @param location 数据源的url - * @param chunkidinfo 目标chunk - * @param sn chunk的序列号 - * @param csn correct sn - * @param chunkSize chunk的大小 - * @param: scc是异步回调 - * - * @return 错误码 + * - The location format is defined as A@B. + * - If the source data is on S3, the location format is uri@s3, where uri is the actual chunk object's address. + * - If the source data is on CurveFS, the location format is /filename/chunkindex@cs. + * + * @param location URL of the data source + * @param chunkidinfo Target chunk + * @param sn chunk's sequence number + * @param csn correct sequence number + * @param chunkSize Size of the chunk + * @param scc Asynchronous callback + * + * @return Error code */ virtual int CreateCloneChunk( const std::string &location, @@ -263,14 +263,14 @@ class CurveFsClient { /** - * @brief 实际恢复chunk数据 + * @brief Actual recovery chunk data * * @param chunkidinfo chunkidinfo - * @param offset 偏移 - * @param len 长度 - * @param: scc是异步回调 + * @param offset offset + * @param len length + * @param: scc is an asynchronous callback * - * @return 错误码 + * @return error code */ virtual int RecoverChunk( const ChunkIDInfo &chunkidinfo, @@ -279,37 +279,37 @@ class CurveFsClient { SnapCloneClosure* scc) = 0; /** - * @brief 通知mds完成Clone Meta + * @brief Notify mds to complete Clone Meta * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ virtual int CompleteCloneMeta( const std::string &filename, const std::string &user) = 0; /** - * @brief 通知mds完成Clone Chunk + * @brief Notify mds to complete Clone Chunk * - * @param filename 目标文件名 - * @param user 用户名 + * @param filename Target file name + * @param user username * - * @return 错误码 + * @return error code */ virtual int CompleteCloneFile( const std::string &filename, const std::string &user) = 0; /** - * @brief 设置clone文件状态 + * @brief Set clone file status * - * @param filename 文件名 - * @param filestatus 要设置的目标状态 - * @param user 用户名 + * @param filename File name + * @param filestatus The target state to be set + * @param user username * - * @return 错误码 + * @return error code */ virtual int SetCloneFileStatus( const std::string &filename, @@ -317,13 +317,13 @@ class CurveFsClient { const std::string &user) = 0; /** - * @brief 获取文件信息 + * @brief Get file information * - * @param filename 文件名 - * @param user 用户名 - * @param[out] fileInfo 文件信息 + * @param filename File name + * @param user username + * @param[out] fileInfo file information * - * @return 错误码 + * @return error code */ virtual int GetFileInfo( const std::string &filename, @@ -331,15 +331,15 @@ class CurveFsClient { FInfo* fileInfo) = 0; /** - * @brief 查询或分配文件segment信息 + * @brief Query or allocate file segment information * - * @param allocate 是否分配 - * @param offset 偏移值 - * @param fileInfo 文件信息 - * @param user 用户名 - * @param segInfo segment信息 + * @param allocate whether to allocate + * @param offset offset value + * @param fileInfo file information + * @param user username + * @param segInfo segment information * - * @return 错误码 + * @return error code */ virtual int GetOrAllocateSegmentInfo( bool allocate, @@ -349,15 +349,15 @@ class CurveFsClient { SegmentInfo *segInfo) = 0; /** - * @brief 为recover rename复制的文件 + * @brief is the file copied for recover rename * - * @param user 用户信息 - * @param originId 被恢复的原始文件Id - * @param destinationId 克隆出的目标文件Id - * @param origin 被恢复的原始文件名 - * @param destination 克隆出的目标文件 + * @param user user information + * @param originId The original file ID that was restored + * @param destinationId The cloned target file ID + * @param origin The original file name of the recovered file + * @param destination Cloned destination file * - * @return 错误码 + * @return error code */ virtual int RenameCloneFile( const std::string &user, @@ -368,13 +368,13 @@ class CurveFsClient { /** - * @brief 删除文件 + * @brief Delete file * - * @param fileName 文件名 - * @param user 用户名 - * @param fileId 删除文件的inodeId + * @param fileName File name + * @param user username + * @param fileId Delete the inodeId of the file * - * @return 错误码 + * @return error code */ virtual int DeleteFile( const std::string &fileName, @@ -382,23 +382,23 @@ class CurveFsClient { uint64_t fileId) = 0; /** - * @brief 创建目录 + * @brief Create directory * - * @param dirpath 目录名 - * @param user 用户名 + * @param dirpath directory name + * @param user username * - * @return 错误码 + * @return error code */ virtual int Mkdir(const std::string& dirpath, const std::string &user) = 0; /** - * @brief 变更文件的owner + * @brief Change the owner of the file * - * @param filename 文件名 - * @param newOwner 新的owner + * @param filename File name + * @param newOwner New owner * - * @return 错误码 + * @return error code */ virtual int ChangeOwner(const std::string& filename, const std::string& newOwner) = 0; @@ -411,7 +411,7 @@ class CurveFsClientImpl : public CurveFsClient { snapClient_(snapClient), fileClient_(fileClient) {} virtual ~CurveFsClientImpl() {} - // 以下接口定义见CurveFsClient接口注释 + // The following interface definitions can be found in the CurveFsClient interface annotations int Init(const CurveClientOptions &options) override; int UnInit() override; diff --git a/src/snapshotcloneserver/common/snapshotclone_info.h b/src/snapshotcloneserver/common/snapshotclone_info.h index 766ae00e05..6a10346dc4 100644 --- a/src/snapshotcloneserver/common/snapshotclone_info.h +++ b/src/snapshotcloneserver/common/snapshotclone_info.h @@ -61,7 +61,7 @@ enum class CloneStep { kEnd }; -// 数据库中clone/recover任务信息 +// Clone/recover task information in the database class CloneInfo { public: CloneInfo() @@ -231,37 +231,37 @@ class CloneInfo { bool ParseFromString(const std::string &value); private: - // 任务Id + // Task Id TaskIdType taskId_; - // 用户 + // Users std::string user_; - // 克隆或恢复 + // Clone or Restore CloneTaskType type_; - // 源文件或快照uuid + // Source file or snapshot uuid std::string source_; - // 目标文件名 + // Destination File Name std::string destination_; - // 目标文件所在的poolset + // The poolset where the target file is located std::string poolset_; - // 被恢复的原始文件id, 仅用于恢复 + // The original file ID that has been restored, for recovery purposes only uint64_t originId_; - // 目标文件id + // Target file id uint64_t destinationId_; - // 创建时间 + // Creation time uint64_t time_; - // 克隆/恢复的文件类型 + // Clone/Restore File Types CloneFileType fileType_; - // 是否lazy + // Lazy or not bool isLazy_; - // 克隆进度, 下一个步骤 + // Clone progress, next step CloneStep nextStep_; - // 处理的状态 + // Processing status CloneStatus status_; }; std::ostream& operator<<(std::ostream& os, const CloneInfo &cloneInfo); -// 快照处理状态 +// Snapshot processing status enum class Status{ done = 0, pending, @@ -271,7 +271,7 @@ enum class Status{ error }; -// 快照信息 +// Snapshot Information class SnapshotInfo { public: SnapshotInfo() @@ -437,21 +437,21 @@ class SnapshotInfo { bool ParseFromString(const std::string &value); private: - // 快照uuid + // Snapshot uuid UUID uuid_; - // 租户信息 + // Tenant Information std::string user_; - // 快照目标文件名 + // Snapshot Destination File Name std::string fileName_; - // 快照名 + // Snapshot Name std::string snapshotName_; - // 快照版本号 + // Snapshot version number uint64_t seqNum_; - // 文件的chunk大小 + // Chunk size of the file uint32_t chunkSize_; - // 文件的segment大小 + // The segment size of the file uint64_t segmentSize_; - // 文件大小 + // File size uint64_t fileLength_; // stripe size uint64_t stripeUnit_; @@ -459,9 +459,9 @@ class SnapshotInfo { uint64_t stripeCount_; // poolset std::string poolset_; - // 快照创建时间 + // Snapshot creation time uint64_t time_; - // 快照处理的状态 + // Status of snapshot processing Status status_; }; diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store.h b/src/snapshotcloneserver/common/snapshotclone_meta_store.h index ff550f5fc7..ac6f3f585b 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store.h @@ -43,23 +43,23 @@ class SnapshotCloneMetaStore { public: SnapshotCloneMetaStore() {} virtual ~SnapshotCloneMetaStore() {} - // 添加一条快照信息记录 + // Add a snapshot information record /** - * 添加一条快照记录到metastore中 - * @param 快照信息结构体 - * @return: 0 插入成功/ -1 插入失败 + * Add a snapshot record to metastore + * @param snapshot information structure + * @return: 0 insertion successful/-1 insertion failed */ virtual int AddSnapshot(const SnapshotInfo &snapinfo) = 0; /** - * 从metastore删除一条快照记录 - * @param 快照任务的uuid,全局唯一 - * @return 0 删除成功/ -1 删除失败 + * Delete a snapshot record from metastore + * @param The uuid of the snapshot task, globally unique + * @return 0 successfully deleted/-1 failed to delete */ virtual int DeleteSnapshot(const UUID &uuid) = 0; /** - * 更新快照记录 - * @param 快照信息结构体 - * @return: 0 更新成功/ -1 更新失败 + * Update snapshot records + * @param snapshot information structure + * @return: 0 successfully updated/-1 failed to update */ virtual int UpdateSnapshot(const SnapshotInfo &snapinfo) = 0; @@ -75,74 +75,74 @@ class SnapshotCloneMetaStore { virtual int CASSnapshot(const UUID& uuid, CASFunc cas) = 0; /** - * 获取指定快照的快照信息 - * @param 快照的uuid - * @param 保存快照信息的指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain snapshot information for the specified snapshot + * @param uuid of snapshot + * @param pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ virtual int GetSnapshotInfo(const UUID &uuid, SnapshotInfo *info) = 0; /** - * 获取指定文件的快照信息列表 - * @param 文件名 - * @param 保存快照信息的vector指针 - * @return 0 获取成功/ -1 获取失败 + * Obtain a list of snapshot information for the specified file + * @param file name + * @param vector pointer to save snapshot information + * @return 0 successfully obtained/-1 failed to obtain */ virtual int GetSnapshotList(const std::string &filename, std::vector *v) = 0; /** - * 获取全部的快照信息列表 - * @param 保存快照信息的vector指针 - * @return: 0 获取成功/ -1 获取失败 + * Obtain a list of all snapshot information + * @param vector pointer to save snapshot information + * @return: 0 successfully obtained/-1 failed to obtain */ virtual int GetSnapshotList(std::vector *list) = 0; /** - * @brief 获取快照总数 + * @brief Total number of snapshots taken * - * @return 快照总数 + * @return Total number of snapshots */ virtual uint32_t GetSnapshotCount() = 0; /** - * @brief 插入一条clone任务记录到metastore - * @param clone记录信息 - * @return: 0 插入成功/ -1 插入失败 + * @brief Insert a clone task record into metastore + * @param clone records information + * @return: 0 insertion successful/-1 insertion failed */ virtual int AddCloneInfo(const CloneInfo &cloneInfo) = 0; /** - * @brief 从metastore删除一条clone任务记录 - * @param clone任务的任务id - * @return: 0 删除成功/ -1 删除失败 + * @brief Delete a clone task record from metastore + * @param Task ID of clone task + * @return: 0 successfully deleted/-1 failed to delete */ virtual int DeleteCloneInfo(const std::string &taskID) = 0; /** - * @brief 更新一条clone任务记录 - * @param clone记录信息 - * @return: 0 更新成功/ -1 更新失败 + * @brief Update a clone task record + * @param clone records information + * @return: 0 successfully updated/-1 failed to update */ virtual int UpdateCloneInfo(const CloneInfo &cloneInfo) = 0; /** - * @brief 获取指定task id的clone任务信息 - * @param clone任务id - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get clone task information for the specified task ID + * @param clone Task ID + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ virtual int GetCloneInfo(const std::string &taskID, CloneInfo *info) = 0; /** - * @brief 获取指定文件的clone任务信息 + * @brief Get clone task information for the specified file * - * @param fileName 文件名 - * @param[out] clone记录信息的指针 - * @return: 0 获取成功/ -1 获取失败 + * @param fileName File name + * @param[out] pointer to clone record information + * @return: 0 successfully obtained/-1 failed to obtain */ virtual int GetCloneInfoByFileName( const std::string &fileName, std::vector *list) = 0; /** - * @brief 获取所有clone任务的信息列表 - * @param[out] 只想clone任务vector指针 - * @return: 0 获取成功/ -1 获取失败 + * @brief Get a list of information for all clone tasks + * @param[out] just wants to clone the task vector pointer + * @return: 0 successfully obtained/-1 failed to obtain */ virtual int GetCloneInfoList(std::vector *list) = 0; }; diff --git a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h index 6bc69aca1e..ea44cdc2dd 100644 --- a/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h +++ b/src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h @@ -83,16 +83,16 @@ class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { private: /** - * @brief 加载快照信息 + * @brief Load snapshot information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadSnapshotInfos(); /** - * @brief 加载克隆信息 + * @brief Load clone information * - * @return 0 加载成功/ -1 加载失败 + * @return 0 successfully loaded/ -1 failed to load */ int LoadCloneInfos(); @@ -100,11 +100,11 @@ class SnapshotCloneMetaStoreEtcd : public SnapshotCloneMetaStore { std::shared_ptr client_; std::shared_ptr codec_; - // key is UUID, map 需要考虑并发保护 + // Key is UUID, map needs to consider concurrency protection std::map snapInfos_; // snap info lock RWLock snapInfos_mutex; - // key is TaskIdType, map 需要考虑并发保护 + // Key is TaskIdType, map needs to consider concurrency protection std::map cloneInfos_; // clone info map lock RWLock cloneInfos_lock_; diff --git a/src/snapshotcloneserver/common/snapshotclone_metric.h b/src/snapshotcloneserver/common/snapshotclone_metric.h index 410d9b19f9..2c6fbc4bd5 100644 --- a/src/snapshotcloneserver/common/snapshotclone_metric.h +++ b/src/snapshotcloneserver/common/snapshotclone_metric.h @@ -53,17 +53,17 @@ struct SnapshotMetric { const std::string SnapshotMetricPrefix = "snapshotcloneserver_snapshot_metric_"; - // 正在进行的快照数量 + // Number of snapshots in progress bvar::Adder snapshotDoing; - // 正在等待的快照数量 + // Number of waiting snapshots bvar::Adder snapshotWaiting; - // 累计成功的快照数量 + // Accumulated number of successful snapshots bvar::Adder snapshotSucceed; - // 累计失败的快照数量 + // Accumulated number of failed snapshots bvar::Adder snapshotFailed; std::shared_ptr metaStore_; - // 系统内快照总量 + // Total number of snapshots within the system bvar::PassiveStatus snapshotNum; explicit SnapshotMetric(std::shared_ptr metaStore) : @@ -92,25 +92,25 @@ struct CloneMetric { const std::string CloneMetricPrefix = "snapshotcloneserver_clone_metric_"; - // 正在执行的克隆任务数量 + // Number of cloning tasks being executed bvar::Adder cloneDoing; - // 累计成功的克隆任务数量 + // Accumulated number of successful cloning tasks bvar::Adder cloneSucceed; - // 累计失败的克隆任务数量 + // Accumulated number of failed clone tasks bvar::Adder cloneFailed; - // 正在执行的恢复任务数量 + // Number of recovery tasks being executed bvar::Adder recoverDoing; - // 累计成功的恢复任务数量 + // Accumulated number of successful recovery tasks bvar::Adder recoverSucceed; - // 累计失败的恢复任务数量 + // Accumulated number of failed recovery tasks bvar::Adder recoverFailed; - // 正在执行的Flatten任务数量 + // Number of Flatten tasks being executed bvar::Adder flattenDoing; - // 累计成功的Flatten任务数量 + // Accumulated number of successful Flatten tasks bvar::Adder flattenSucceed; - // 累计失败的Flatten任务数量 + // Accumulated number of failed Flatten tasks bvar::Adder flattenFailed; CloneMetric() : diff --git a/src/snapshotcloneserver/common/task.h b/src/snapshotcloneserver/common/task.h index bc0faa4178..0268fa2446 100644 --- a/src/snapshotcloneserver/common/task.h +++ b/src/snapshotcloneserver/common/task.h @@ -44,9 +44,9 @@ class Task { Task& operator=(Task&&) = default; /** - * @brief 获取快照任务执行体闭包 + * @brief Get snapshot task execution body closure * - * @return 快照任务执行体 + * @return Snapshot Task Execution Body */ virtual std::function clousre() { return [this] () { @@ -55,21 +55,21 @@ class Task { } /** - * @brief 获取快照任务id + * @brief Get snapshot task ID * - * @return 快照任务id + * @return Snapshot Task ID */ TaskIdType GetTaskId() const { return taskId_; } /** - * @brief 快照执行函数接口 + * @brief snapshot execution function interface */ virtual void Run() = 0; private: - // 快照id + // Snapshot ID TaskIdType taskId_; }; diff --git a/src/snapshotcloneserver/common/task_info.h b/src/snapshotcloneserver/common/task_info.h index cc72201d52..39f7440921 100644 --- a/src/snapshotcloneserver/common/task_info.h +++ b/src/snapshotcloneserver/common/task_info.h @@ -48,59 +48,59 @@ class TaskInfo { TaskInfo& operator=(TaskInfo&&) = default; /** - * @brief 设置任务完成度百分比 + * @brief Set task completion percentage * - * @param persent 任务完成度百分比 + * @param persent task completion percentage */ void SetProgress(uint32_t persent) { progress_ = persent; } /** - * @brief 获取任务完成度百分比 + * @brief Get task completion percentage * - * @return 任务完成度百分比 + * @return Task completion percentage */ uint32_t GetProgress() const { return progress_; } /** - * @brief 完成任务 + * @brief Complete the task */ void Finish() { isFinish_.store(true); } /** - * @brief 获取任务是否完成 + * @brief: Is the task completed * - * @retval true 任务完成 - * @retval false 任务未完成 + * @retval true Task completed + * @retval false Task not completed */ bool IsFinish() const { return isFinish_.load(); } /** - * @brief 取消任务 + * @brief Cancel Task */ void Cancel() { isCanceled_ = true; } /** - * @brief 获取任务是否取消 + * @brief: Do you want to cancel the task * - * @retval true 任务已取消 - * @retval false 任务未取消 + * @retval true The task has been canceled + * @retval false The task was not canceled */ bool IsCanceled() const { return isCanceled_; } /** - * @brief 重置任务 + * @brief reset task */ void Reset() { isFinish_.store(false); @@ -108,26 +108,26 @@ class TaskInfo { } /** - * @brief 获取任务锁的引用,以便使用LockGuard加锁解锁 + * @brief: Obtain a reference to the task lock for unlocking using LockGuard * - * 用于同步任务完成和取消功能 - * 1. 任务完成前,先锁定任务,然后判断任务是否取消, - * 若已取消,则释放锁, - * 否则执行任务完成逻辑之后释放锁。 - * 2. 任务取消前,先锁定任务,然后判断任务是否完成, - * 若已完成,则释放锁, - * 否则执行任务取消逻辑之后释放锁。 + * Used to synchronize task completion and cancellation functions + * 1. Before completing the task, first lock the task and then determine whether the task is cancelled, + * If cancelled, release the lock, + * Otherwise, release the lock after completing the logic of the task. + * 2. Before canceling a task, first lock the task and then determine whether the task is completed, + * If completed, release the lock, + * Otherwise, execute the task to cancel the logic and release the lock. */ curve::common::Mutex& GetLockRef() { return lock_; } private: - // 任务完成度百分比 + // Task completion percentage uint32_t progress_; - // 任务任务是否结束 + // Is the task completed std::atomic_bool isFinish_; - // 任务是否被取消 + // Has the task been canceled bool isCanceled_; mutable curve::common::Mutex lock_; }; diff --git a/src/snapshotcloneserver/common/thread_pool.h b/src/snapshotcloneserver/common/thread_pool.h index 1f7b4ea697..39974bc766 100644 --- a/src/snapshotcloneserver/common/thread_pool.h +++ b/src/snapshotcloneserver/common/thread_pool.h @@ -31,40 +31,40 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照线程池 + * @brief snapshot thread pool */ class ThreadPool { public: /** - * @brief 构造函数 + * @brief constructor * - * @param threadNum 最大线程数 + * @param threadNum maximum number of threads */ explicit ThreadPool(int threadNum) : threadNum_(threadNum) {} /** - * @brief 启动线程池 + * @brief Start Thread Pool */ int Start(); /** - * @brief 停止线程池 + * @brief Stop thread pool */ void Stop(); /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ void PushTask(std::shared_ptr task) { threadPool_.Enqueue(task->clousre()); } /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task snapshot task */ void PushTask(Task* task) { threadPool_.Enqueue(task->clousre()); @@ -72,11 +72,11 @@ class ThreadPool { private: /** - * @brief 通用线程池 + * @brief Universal Thread Pool */ curve::common::TaskThreadPool<> threadPool_; /** - * @brief 线程数 + * @brief Number of threads */ int threadNum_; }; diff --git a/src/snapshotcloneserver/main.cpp b/src/snapshotcloneserver/main.cpp index b44468b857..24057d7700 100644 --- a/src/snapshotcloneserver/main.cpp +++ b/src/snapshotcloneserver/main.cpp @@ -30,12 +30,12 @@ using Configuration = ::curve::common::Configuration; using SnapShotCloneServer = ::curve::snapshotcloneserver::SnapShotCloneServer; void LoadConfigFromCmdline(Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 + // If there are settings on the command line, the command line overwrites the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("addr", &info) && !info.is_default) { conf->SetStringValue("server.address", FLAGS_addr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { if (!conf->GetStringValue("log.dir", &FLAGS_log_dir)) { LOG(WARNING) << "no log.dir in " << FLAGS_conf diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.cpp b/src/snapshotcloneserver/snapshot/snapshot_core.cpp index 6abb94b5e9..995c1fe0bb 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_core.cpp @@ -64,7 +64,7 @@ int SnapshotCoreImpl::CreateSnapshotPre(const std::string &file, << ", user = " << user << ", snapshotName = " << snapshotName << ", Exist SnapInfo : " << snap; - // 视为同一个快照,返回任务已存在 + // Treat as the same snapshot, return task already exists *snapInfo = snap; return kErrCodeTaskExist; } @@ -136,39 +136,39 @@ constexpr uint32_t kProgressTransferSnapshotDataComplete = 99; constexpr uint32_t kProgressComplete = 100; /** - * @brief 异步执行创建快照任务并更新任务进度 + * @brief Asynchronous execution of snapshot creation task and update of task progress * - * 快照进度规划如下: + * The snapshot schedule is planned as follows: * * |CreateSnapshotOnCurvefs| BuildChunkIndexData | BuildSnapshotMap | TransferSnapshotData | UpdateSnapshot | //NOLINT * | 5% | 6% | 10% | 10%~99% | 100% | //NOLINT * * - * 异步执行期间发生error与cancel情况说明: - * 1. 发生error将导致整个异步任务直接中断,并且不做任何清理动作: - * 发生error时,一般系统存在异常,清理动作很可能不能完成, - * 因此,不进行任何清理,只置状态,待人工干预排除异常之后, - * 使用DeleteSnapshot功能去手动删除error状态的快照。 - * 2. 发生cancel时则以创建功能相反的顺序依次进行清理动作, - * 若清理过程发生error,则立即中断,之后同error过程。 + * Explanation of errors and cancellations during asynchronous execution: + * 1. The occurrence of an error will cause the entire asynchronous task to be directly interrupted without any cleaning action: + * When an error occurs, there is usually an abnormality in the system, and the cleaning action may not be completed, + * Therefore, no cleaning will be carried out, only the status will be set, and after manual intervention to eliminate anomalies, + * Use the DeleteSnapshot function to manually delete snapshots with error status. + * 2. When a cancel occurs, the cleaning actions are carried out in reverse order of creating functions, + * If an error occurs during the cleaning process, it will be immediately interrupted, followed by the same error process. * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleCreateSnapshotTask( std::shared_ptr task) { std::string fileName = task->GetFileName(); - // 如果当前有失败的快照,需先清理失败的快照,否则快照会再次失败 + // If there are currently failed snapshots, it is necessary to clean up the failed snapshots first, otherwise the snapshots will fail again int ret = ClearErrorSnapBeforeCreateSnapshot(task); if (ret < 0) { HandleCreateSnapshotError(task); return; } - // 为支持任务重启,这里有三种情况需要处理 - // 1. 没打过快照, 没有seqNum且curve上没有快照 - // 2. 打过快照, 有seqNum且curve上有快照 - // 3. 打过快照并已经转储完删除快照, 有seqNum但curve上没有快照 + // To support task restart, there are three situations that need to be addressed + // 1. I haven't taken a snapshot, there's no seqNum, and there's no snapshot on the curve + // 2. I have taken a snapshot, and there is seqNum and a snapshot on the curve + // 3. I have taken a snapshot and have completed the dump to delete it. There is seqNum, but there is no snapshot on the curve SnapshotInfo *info = &(task->GetSnapshotInfo()); UUID uuid = task->GetUuid(); @@ -347,9 +347,9 @@ int SnapshotCoreImpl::ClearErrorSnapBeforeCreateSnapshot( std::make_shared(snap, snapInfoMetric); taskInfo->GetSnapshotInfo().SetStatus(Status::errorDeleting); taskInfo->UpdateMetric(); - // 处理删除快照 + // Processing deletion of snapshots HandleDeleteSnapshotTask(taskInfo); - // 仍然失败,则本次快照失败 + // If it still fails, the current snapshot fails if (taskInfo->GetSnapshotInfo().GetStatus() != Status::done) { LOG(ERROR) << "Find error Snapshot and Delete Fail" << ", error snapshot Id = " << snap.GetUuid() @@ -579,7 +579,7 @@ int SnapshotCoreImpl::CreateSnapshotOnCurvefs( return ret; } - // 打完快照需等待2个session时间,以保证seq同步到所有client + // After taking a snapshot, you need to wait for 2 sessions to ensure that the seq is synchronized to all clients std::this_thread::sleep_for( std::chrono::microseconds(mdsSessionTimeUs_ * 2)); @@ -684,12 +684,12 @@ int SnapshotCoreImpl::BuildChunkIndexData( << ", uuid = " << task->GetUuid(); return kErrCodeInternalError; } - // 2个sn,小的是snap sn,大的是快照之后的写 - // 1个sn,有两种情况: - // 小于等于seqNum时为snap sn, 且快照之后未写过; - // 大于时, 表示打快照时为空,是快照之后首次写的版本(seqNum+1) - // 没有sn,从未写过 - // 大于2个sn,错误,报错 + // 2 Sns, the smaller one is the snap snap snap, and the larger one is the write after the snapshot + // 1 SN, there are two situations: + // When it is less than or equal to seqNum, it is a snap snap and has not been written since the snapshot; + // When greater than, it indicates that it was blank when taking a snapshot, and is the version written for the first time after the snapshot (seqNum+1) + // No sn, never written before + // Greater than 2 sns, error, error reported if (chunkInfo.chunkSn.size() == 2) { uint64_t seq = std::min(chunkInfo.chunkSn[0], @@ -878,7 +878,7 @@ int SnapshotCoreImpl::TransferSnapshotData( return kErrCodeSuccess; } } - // 最后剩余数量不足的任务 + // Tasks with insufficient remaining quantity in the end tracker->Wait(); ret = tracker->GetResult(); if (ret < 0) { @@ -900,7 +900,7 @@ int SnapshotCoreImpl::DeleteSnapshotPre( NameLockGuard lockSnapGuard(snapshotRef_->GetSnapshotLock(), uuid); int ret = metaStore_->GetSnapshotInfo(uuid, snapInfo); if (ret < 0) { - // 快照不存在时直接返回删除成功,使接口幂等 + // When the snapshot does not exist, it directly returns deletion success, making the interface idempotent return kErrCodeSuccess; } if (snapInfo->GetUser() != user) { @@ -952,14 +952,14 @@ constexpr uint32_t kDelProgressDeleteChunkDataComplete = 80; constexpr uint32_t kDelProgressDeleteChunkIndexDataComplete = 90; /** - * @brief 异步执行删除快照任务并更新任务进度 + * @brief Asynchronous execution of delete snapshot task and update task progress * - * 删除快照进度规划如下: + * Delete the snapshot schedule as follows: * * |BuildSnapshotMap|DeleteChunkData|DeleteChunkIndexData|DeleteSnapshot| * | 10% | 10%~80% | 90% | 100% | * - * @param task 快照任务 + * @param task snapshot task */ void SnapshotCoreImpl::HandleDeleteSnapshotTask( std::shared_ptr task) { @@ -1129,8 +1129,8 @@ int SnapshotCoreImpl::BuildSnapshotMap(const std::string &fileName, << " ret = " << ret << ", fileName = " << snap.GetFileName() << ", seqNum = " << snap.GetSeqNum(); - // 此处不能返回错误, - // 否则一旦某个失败的快照没有indexdata,所有快照都无法删除 + // An error cannot be returned here, + // Otherwise, once a failed snapshot does not have indexdata, all snapshots cannot be deleted } else { fileSnapshotMap->maps.push_back(std::move(indexData)); } diff --git a/src/snapshotcloneserver/snapshot/snapshot_core.h b/src/snapshotcloneserver/snapshot/snapshot_core.h index 747e02ea2f..a6446800ec 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_core.h +++ b/src/snapshotcloneserver/snapshot/snapshot_core.h @@ -45,18 +45,18 @@ namespace snapshotcloneserver { class SnapshotTaskInfo; /** - * @brief 文件的快照索引块映射表 + * @brief Snapshot index block mapping table for file */ struct FileSnapMap { std::vector maps; /** - * @brief 获取当前映射表中是否存在当前chunk数据 + * @brief to obtain whether the current chunk data exists in the current mapping table * - * @param name chunk数据对象 + * @param name chunk data object * - * @retval true 存在 - * @retval false 不存在 + * @retval true exists + * @retval false does not exist */ bool IsExistChunk(const ChunkDataName &name) const { bool find = false; @@ -71,7 +71,7 @@ struct FileSnapMap { }; /** - * @brief 快照核心模块 + * @brief snapshot core module */ class SnapshotCore { public: @@ -79,14 +79,14 @@ class SnapshotCore { virtual ~SnapshotCore() {} /** - * @brief 创建快照前置操作 + * @brief Create snapshot pre operation * - * @param file 文件名 - * @param user 用户名 - * @param snapshotName 快照名 - * @param[out] snapInfo 快照信息 + * @param file file name + * @param user username + * @param snapshotName SnapshotName + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ virtual int CreateSnapshotPre(const std::string &file, const std::string &user, @@ -94,27 +94,27 @@ class SnapshotCore { SnapshotInfo *snapInfo) = 0; /** - * @brief 执行创建快照任务并更新progress - * 第一步,构建快照文件映射, put MateObj - * 第二步,从curvefs读取chunk文件,并put DataObj - * 第三步,删除curvefs中的临时快照 - * 第四步,update status + * @brief Execute the task of creating a snapshot and update the progress + * Step 1, build a snapshot file mapping and put MateObj + * Step 2, read the chunk file from curvefs and put DataObj + * Step 3, delete the temporary snapshot in curves + * Step 4, update status * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleCreateSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 删除快照前置操作 - * 更新数据库中的快照记录为deleting状态 + * @brief Delete snapshot pre operation + * Update the snapshot records in the database to a deleting state * - * @param uuid 快照uuid - * @param user 用户名 - * @param fileName 文件名 - * @param[out] snapInfo 快照信息 + * @param uuid Snapshot uuid + * @param user username + * @param fileName File name + * @param[out] snapInfo snapshot information * - * @return 错误码 + * @return error code */ virtual int DeleteSnapshotPre( UUID uuid, @@ -123,30 +123,30 @@ class SnapshotCore { SnapshotInfo *snapInfo) = 0; /** - * @brief 执行删除快照任务并更新progress + * @brief Execute the delete snapshot task and update the progress * - * @param task 快照任务信息 + * @param task snapshot task information */ virtual void HandleDeleteSnapshotTask( std::shared_ptr task) = 0; /** - * @brief 获取文件的快照信息 + * @brief Get snapshot information of files * - * @param file 文件名 - * @param info 快照信息列表 + * @param file file name + * @param info snapshot information list * - * @return 错误码 + * @return error code */ virtual int GetFileSnapshotInfo(const std::string &file, std::vector *info) = 0; /** - * @brief 获取全部快照信息 + * @brief Get all snapshot information * - * @param list 快照信息列表 + * @param list snapshot information list * - * @return 错误码 + * @return error code */ virtual int GetSnapshotList(std::vector *list) = 0; @@ -171,11 +171,11 @@ class SnapshotCore { class SnapshotCoreImpl : public SnapshotCore { public: /** - * @brief 构造函数 + * @brief constructor * - * @param client curve客户端对象 - * @param metaStore meta存储对象 - * @param dataStore data存储对象 + * @param client curve client object + * @param metaStore MetaStorage Object + * @param dataStore data storage object */ SnapshotCoreImpl( std::shared_ptr client, @@ -206,7 +206,7 @@ class SnapshotCoreImpl : public SnapshotCore { threadPool_->Stop(); } - // 公有接口定义见SnapshotCore接口注释 + // Public interface definition can be found in the SnapshotCore interface annotation int CreateSnapshotPre(const std::string &file, const std::string &user, const std::string &snapshotName, @@ -239,38 +239,38 @@ class SnapshotCoreImpl : public SnapshotCore { private: /** - * @brief 构建快照文件映射 + * @brief Build snapshot file mapping * - * @param fileName 文件名 - * @param seqNum 快照版本号 - * @param fileSnapshotMap 快照文件映射表 + * @param fileName File name + * @param seqNum snapshot version number + * @param fileSnapshotMap snapshot file mapping table * - * @return 错误码 + * @return error code */ int BuildSnapshotMap(const std::string &fileName, uint64_t seqNum, FileSnapMap *fileSnapshotMap); /** - * @brief 构建Segment信息 + * @brief Build Segment Information * - * @param info 快照信息 - * @param segInfos Segment信息表 + * @param info snapshot information + * @param segInfos Segment Information Table * - * @return 错误码 + * @return error code */ int BuildSegmentInfo( const SnapshotInfo &info, std::map *segInfos); /** - * @brief 在curvefs上创建快照 + * @brief Create a snapshot on curves * - * @param fileName 文件名 - * @param info 快照信息 - * @param task 快照任务信息 + * @param fileName File name + * @param info snapshot information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ int CreateSnapshotOnCurvefs( const std::string &fileName, @@ -278,23 +278,23 @@ class SnapshotCoreImpl : public SnapshotCore { std::shared_ptr task); /** - * @brief 删除curvefs上的快照 + * @brief Delete snapshot on curves * - * @param info 快照信息 + * @param info snapshot information * - * @return 错误码 + * @return error code */ int DeleteSnapshotOnCurvefs(const SnapshotInfo &info); /** - * @brief 构建索引块 + * @brief Build Index Block * - * @param info 快照信息 - * @param[out] indexData 索引块 - * @param[out] segInfos Segment信息 - * @param task 快照任务信息 + * @param info snapshot information + * @param[out] indexData index block + * @param[out] segInfos Segment Information + * @param task snapshot task information * - * @return 错误码 + * @return error code */ int BuildChunkIndexData( const SnapshotInfo &info, @@ -306,15 +306,15 @@ class SnapshotCoreImpl : public SnapshotCore { std::function; /** - * @brief 转储快照过程 + * @brief Dump snapshot process * - * @param indexData 索引块 - * @param info 快照信息 - * @param segInfos Segment信息 - * @param filter 转储数据块过滤器 - * @param task 快照任务信息 + * @param indexData index block + * @param info snapshot information + * @param segInfos Segment Information + * @param filter Dump data block filter + * @param task snapshot task information * - * @return 错误码 + * @return error code */ int TransferSnapshotData( const ChunkIndexData indexData, @@ -324,21 +324,21 @@ class SnapshotCoreImpl : public SnapshotCore { std::shared_ptr task); /** - * @brief 开始cancel,更新任务状态,更新数据库状态 + * @brief Start cancel, update task status, update database status * - * @param task 快照任务信息 + * @param task snapshot task information * - * @return 错误码 + * @return error code */ int StartCancel( std::shared_ptr task); /** - * @brief 转储数据之后取消快照过程 + * @brief: Cancel the snapshot process after dumping data * - * @param task 快照任务信息 - * @param indexData 索引块 - * @param fileSnapshotMap 快照文件映射表 + * @param task snapshot task information + * @param indexData index block + * @param fileSnapshotMap snapshot file mapping table */ void CancelAfterTransferSnapshotData( std::shared_ptr task, @@ -346,94 +346,94 @@ class SnapshotCoreImpl : public SnapshotCore { const FileSnapMap &fileSnapshotMap); /** - * @brief 创建索引块之后取消快照过程 + * @brief Cancel the snapshot process after creating the index block * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateChunkIndexData( std::shared_ptr task); /** - * @brief 在curvefs上创建快照之后取消快照过程 + * @brief: Cancel the snapshot process after creating a snapshot on curves * - * @param task 快照任务信息 + * @param task snapshot task information */ void CancelAfterCreateSnapshotOnCurvefs( std::shared_ptr task); /** - * @brief 在Mate数据存储在删除快照 + * @brief in Mate data storage, delete snapshot * - * @param task 快照任务信息 + * @param task snapshot task information */ void HandleClearSnapshotOnMateStore( std::shared_ptr task); /** - * @brief 处理创建快照任务成功 + * @brief successfully processed the snapshot creation task * - * @param task 快照任务信息 + * @param task snapshot task information */ void HandleCreateSnapshotSuccess( std::shared_ptr task); /** - * @brief 处理创建快照任务失败过程 + * @brief processing failed snapshot creation task process * - * @param task 快照任务信息 + * @param task snapshot task information */ void HandleCreateSnapshotError( std::shared_ptr task); /** - * @brief 处理删除快照任务失败过程 + * @brief failed to process the delete snapshot task process * - * @param task 快照任务信息 + * @param task snapshot task information */ void HandleDeleteSnapshotError( std::shared_ptr task); /** - * @brief 创建快照前尝试清理失败的快照,否则可能会再次失败 + * @brief Attempt to clean up failed snapshots before creating them, otherwise they may fail again * - * @param task 快照任务信息 - * @return 错误码 + * @param task snapshot task information + * @return error code */ int ClearErrorSnapBeforeCreateSnapshot( std::shared_ptr task); private: - // curvefs客户端对象 + // Curvefs client object std::shared_ptr client_; - // meta数据存储 + // Meta Data Storage std::shared_ptr metaStore_; - // data数据存储 + // Data storage std::shared_ptr dataStore_; - // 快照引用计数管理模块 + // Snapshot Reference Count Management Module std::shared_ptr snapshotRef_; - // 执行并发步骤的线程池 + // Thread pool for executing concurrent steps std::shared_ptr threadPool_; - // 锁住打快照的文件名,防止并发同时对其打快照,同一文件的快照需排队 + // Lock the file name of the snapshot to prevent concurrent snapshots. Snapshots of the same file need to be queued NameLock snapshotNameLock_; - // 转储chunk分片大小 + // Dump chunk shard size uint64_t chunkSplitSize_; - // CheckSnapShotStatus调用间隔 + // CheckSnapShotStatus call interval uint32_t checkSnapshotStatusIntervalMs_; - // 最大快照数 + // Maximum Snapshots uint32_t maxSnapshotLimit_; - // 线程数 + // Number of threads uint32_t snapshotCoreThreadNum_; - // session超时时间 + // Session timeout uint32_t mdsSessionTimeUs_; - // client异步回调请求的重试总时间 + // Total retry time for client asynchronous callback requests uint64_t clientAsyncMethodRetryTimeSec_; - // 调用client异步方法重试时间间隔 + // Call client asynchronous method retry interval uint64_t clientAsyncMethodRetryIntervalMs_; - // 异步ReadChunkSnapshot的并发数 + // The concurrency of asynchronous ReadChunkSnapshots uint32_t readChunkSnapshotConcurrency_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp index 8401af3b82..bcd5b16b78 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.cpp @@ -28,7 +28,7 @@ namespace curve { namespace snapshotcloneserver { bool ToChunkDataName(const std::string &name, ChunkDataName *cName) { - // 逆向解析string,以支持文件名具有分隔字符的情况 + // Reverse parsing of strings to support cases where file names have separator characters std::string::size_type pos = name.find_last_of(kChunkDataNameSeprator); std::string::size_type lastPos = std::string::npos; @@ -65,7 +65,7 @@ bool ChunkIndexData::Serialize(std::string *data) const { ChunkDataName(fileName_, m.second, m.first). ToDataChunkKey()}); } - // Todo:可以转化为stream给adpater接口使用SerializeToOstream + // Todo: Can be converted into a stream for the adpater interface to use SerializeToOstream return map.SerializeToString(data); } diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store.h b/src/snapshotcloneserver/snapshot/snapshot_data_store.h index ae88b7694b..8802e7f287 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store.h @@ -57,8 +57,8 @@ class ChunkDataName { chunkSeqNum_(seq), chunkIndex_(chunkIndex) {} /** - * 构建datachunk对象的名称 文件名-chunk索引-版本号 - * @return: 对象名称字符串 + * Build the name of the datachunk object File name Chunk index Version number + * @return: Object name string */ std::string ToDataChunkKey() const { return fileName_ @@ -80,13 +80,13 @@ inline bool operator==(const ChunkDataName &lhs, const ChunkDataName &rhs) { } /** - * @brief 根据对象名称解析生成chunkdataname对象 + * @brief Generate chunkdataname object based on object name parsing * - * @param name 对象名 - * @param[out] cName chunkDataName对象 + * @param name Object name + * @param[out] cName chunkDataName object * - * @retVal true 成功 - * @retVal false 失败 + * @retval true succeeded + * @retval false failed */ bool ToChunkDataName(const std::string &name, ChunkDataName *cName); @@ -100,8 +100,8 @@ class ChunkIndexDataName { fileSeqNum_ = seq; } /** - * 构建索引chunk的名称 文件名+文件版本号 - * @return: 索引chunk的名称字符串 + * Build the name of the index chunk file name+file version number + * @return: The name string of the index chunk */ std::string ToIndexDataChunkKey() const { return this->fileName_ @@ -109,9 +109,9 @@ class ChunkIndexDataName { + std::to_string(this->fileSeqNum_); } - // 文件名 + // File name std::string fileName_; - // 文件版本号 + // File version number SnapshotSeqType fileSeqNum_; }; @@ -119,16 +119,16 @@ class ChunkIndexData { public: ChunkIndexData() {} /** - * 索引chunk数据序列化(使用protobuf实现) - * @param 保存序列化后数据的指针 - * @return: true 序列化成功/ false 序列化失败 + * Index chunk data serialization (implemented using protobuf) + * @param data Saves a pointer to serialized data + * @return: true Serialization succeeded/false Serialization failed */ bool Serialize(std::string *data) const; /** - * 反序列化索引chunk的数据到map中 - * @param 索引chunk存储的数据 - * @return: true 反序列化成功/ false 反序列化失败 + * Deserialize the data of the index chunk into the map + * @param data The data stored in the index chunk + * @return: true Deserialization succeeded/false Deserialization failed */ bool Unserialize(const std::string &data); @@ -151,9 +151,9 @@ class ChunkIndexData { } private: - // 文件名 + // File name std::string fileName_; - // 快照文件索引信息map + // Snapshot file index information map std::map chunkMap_; }; @@ -190,83 +190,83 @@ class SnapshotDataStore { SnapshotDataStore() {} virtual ~SnapshotDataStore() {} /** - * 快照的datastore初始化,根据存储的类型有不同的实现 - * @param s3配置文件路径 - * @return 0 初始化成功/ -1 初始化失败 + * The datastore initialization of snapshots can be implemented differently depending on the type of storage + * @param s3 configuration file path + * @return 0 initialization successful/-1 initialization failed */ virtual int Init(const std::string &confpath) = 0; /** - * 存储快照文件的元数据信息到datastore中 - * @param 元数据对象名 - * @param 元数据对象的数据内容 - * @return 0 保存成功/ -1 保存失败 + * Store the metadata information of the snapshot file in the datastore + * @param name Metadata object name + * @param The data content of the metadata object + * @return 0 saved successfully/-1 failed to save */ virtual int PutChunkIndexData(const ChunkIndexDataName &name, const ChunkIndexData &meta) = 0; /** - * 获取快照文件的元数据信息 - * @param 元数据对象名 - * @param 保存元数据数据内容的指针 - * return: 0 获取成功/ -1 获取失败 + * Obtain metadata information for snapshot files + * @param name Metadata object name + * @param Pointer to save metadata data content + * @return: 0 successfully obtained/-1 failed to obtain */ virtual int GetChunkIndexData(const ChunkIndexDataName &name, ChunkIndexData *meta) = 0; /** - * 删除快照文件的元数据 - * @param 元数据对象名 - * @return: 0 删除成功/ -1 删除失败 + * Delete metadata for snapshot files + * @param name Metadata object name + * @return: 0 successfully deleted/-1 failed to delete */ virtual int DeleteChunkIndexData(const ChunkIndexDataName &name) = 0; - // 快照元数据chunk是否存在 + // Does the snapshot metadata chunk exist /** - * 判断快照元数据是否存在 - * @param 元数据对象名 - * @return: true 存在/ false 不存在 + * Determine whether snapshot metadata exists + * @param name Metadata object name + * @return: true exists/false does not exist */ virtual bool ChunkIndexDataExist(const ChunkIndexDataName &name) = 0; /* - // 存储快照文件的数据信息到datastore + // Store the data information of the snapshot file in the datastore virtual int PutChunkData(const ChunkDataName &name, const ChunkData &data) = 0; - // 读取快照文件的数据信息 + // Reading data information from snapshot files virtual int GetChunkData(const ChunkDataName &name, ChunkData *data) = 0; */ /** - * 删除快照的数据chunk - * @param 数据chunk名 - * @return: 0 删除成功/ -1 删除失败 + * Delete the data chunk of the snapshot + * @param name chunk data name + * @return: 0 successfully deleted/-1 failed to delete */ virtual int DeleteChunkData(const ChunkDataName &name) = 0; /** - * 判断快照的数据chunk是否存在 - * @param 数据chunk名称 - * @return: true 存在/ false 不存在 + * Determine whether the data chunk of the snapshot exists + * @param name chunk data name + * @return: true exists/false does not exist */ virtual bool ChunkDataExist(const ChunkDataName &name) = 0; - // 设置快照转储完成标志 + // Set snapshot dump completion flag /* virtual int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) = 0; - // 获取快照转储完成标志 + // Get snapshot dump completion flag virtual int GetSnapshotFlag(const ChunkIndexDataName &name) = 0; */ /** - * 初始化数据库chunk的分片转储任务 - * @param 数据chunk名称 - * @param 管理转储任务的指针 - * @return 0 任务初始化成功/ -1 任务初始化失败 + * Initialize the sharded dump task of the database chunk + * @param name chunk data name + * @param task Pointer to management dump task + * @return 0 Task initialization successful/-1 Task initialization failed */ virtual int DataChunkTranferInit(const ChunkDataName &name, std::shared_ptr task) = 0; /** - * 添加数据chunk的一个分片到转储任务中 - * @param 数据chunk名 - * @转储任务 - * @第几个分片 - * @分片大小 - * @分片的数据内容 - * @return: 0 添加成功/ -1 添加失败 + * Add a shard of data chunk to a dumping task. + * @param name chunk name + * @param task Dumping task + * @param partNum Index of the shard + * @param partSize Shard size + * @param buf Shard data content + * @return: 0 for successful addition / -1 for failure to add */ virtual int DataChunkTranferAddPart(const ChunkDataName &name, std::shared_ptr task, @@ -274,18 +274,18 @@ class SnapshotDataStore { int partSize, const char* buf) = 0; /** - * 完成数据chunk的转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 转储任务完成/ 转储任务失败 -1 + *Complete the dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 Dump task completed/Dump task failed -1 */ virtual int DataChunkTranferComplete(const ChunkDataName &name, std::shared_ptr task) = 0; /** - * 终止数据chunk的分片转储任务 - * @param 数据chunk名 - * @param 转储任务管理结构 - * @return: 0 任务终止成功/ -1 任务终止失败 + *Terminate the sharded dump task of data chunks + * @param name chunk data name + * @param task Dump Task Management Structure + * @return: 0 mission terminated successfully/-1 mission terminated failed */ virtual int DataChunkTranferAbort(const ChunkDataName &name, std::shared_ptr task) = 0; diff --git a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h index d1324243e4..f49bafc46a 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h +++ b/src/snapshotcloneserver/snapshot/snapshot_data_store_s3.h @@ -55,7 +55,7 @@ class S3SnapshotDataStore : public SnapshotDataStore { // ChunkData *data) override; int DeleteChunkData(const ChunkDataName &name) override; bool ChunkDataExist(const ChunkDataName &name) override; -/* nos暂时不支持,后续增加 +/* NOS is currently not supported, but will be added in the future int SetSnapshotFlag(const ChunkIndexDataName &name, int flag) override; int GetSnapshotFlag(const ChunkIndexDataName &name) override; */ diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp index 747b666350..7ce2a5db08 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.cpp @@ -50,7 +50,7 @@ int SnapshotServiceManager::CreateSnapshot(const std::string &file, int ret = core_->CreateSnapshotPre(file, user, snapshotName, &snapInfo); if (ret < 0) { if (kErrCodeTaskExist == ret) { - // 任务已存在的情况下返回成功,使接口幂等 + // Returns success if the task already exists, making the interface idempotent *uuid = snapInfo.GetUuid(); return kErrCodeSuccess; } @@ -124,10 +124,10 @@ int SnapshotServiceManager::DeleteSnapshot( if (kErrCodeTaskExist == ret) { return kErrCodeSuccess; } else if (kErrCodeSnapshotCannotDeleteUnfinished == ret) { - // 转Cancel + // Transfer to Cancel ret = CancelSnapshot(uuid, user, file); if (kErrCodeCannotCancelFinished == ret) { - // 防止这一过程中又执行完了 + // To prevent the execution from completing again during this process ret = core_->DeleteSnapshotPre(uuid, user, file, &snapInfo); if (ret < 0) { LOG(ERROR) << "DeleteSnapshotPre fail" @@ -228,7 +228,7 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( info->emplace_back(snap, task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { @@ -248,7 +248,7 @@ int SnapshotServiceManager::GetFileSnapshotInfoInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is possible to enter here return kErrCodeInternalError; } } @@ -319,7 +319,7 @@ int SnapshotServiceManager::GetSnapshotListInner( info->emplace_back(snap, task->GetTaskInfo()->GetProgress()); } else { - // 刚刚完成 + // Just completed SnapshotInfo newInfo; ret = core_->GetSnapshotInfo(uuid, &newInfo); if (ret < 0) { @@ -339,7 +339,7 @@ int SnapshotServiceManager::GetSnapshotListInner( } default: LOG(ERROR) << "can not reach here!"; - // 当更新数据库失败时,有可能进入这里 + // When updating the database fails, it is possible to enter here return kErrCodeInternalError; } } @@ -398,7 +398,7 @@ int SnapshotServiceManager::RecoverSnapshotTask() { } break; } - // 重启恢复的canceling等价于errorDeleting + // canceling restart recovery is equivalent to errorDeleting case Status::canceling : case Status::deleting : case Status::errorDeleting : { diff --git a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h index 1aa7143e9f..c37e63fca7 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_service_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_service_manager.h @@ -38,17 +38,17 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 文件单个快照信息 + * @brief file single snapshot information */ class FileSnapshotInfo { public: FileSnapshotInfo() = default; /** - * @brief 构造函数 + * @brief constructor * - * @param snapInfo 快照信息 - * @param snapProgress 快照完成度百分比 + * @param snapInfo snapshot information + * @param snapProgress snapshot completion percentage */ FileSnapshotInfo(const SnapshotInfo &snapInfo, uint32_t snapProgress) @@ -101,9 +101,9 @@ class FileSnapshotInfo { } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapInfo_; - // 快照处理进度百分比 + // Snapshot processing progress percentage uint32_t snapProgress_; }; @@ -151,10 +151,10 @@ class SnapshotFilterCondition { class SnapshotServiceManager { public: /** - * @brief 构造函数 + * @brief constructor * - * @param taskMgr 快照任务管理类对象 - * @param core 快照核心模块 + * @param taskMgr snapshot task management class object + * @param core snapshot core module */ SnapshotServiceManager( std::shared_ptr taskMgr, @@ -165,34 +165,34 @@ class SnapshotServiceManager { virtual ~SnapshotServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ virtual int Init(const SnapshotCloneServerOptions &option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 创建快照服务 + * @brief Create snapshot service * - * @param file 文件名 - * @param user 文件所属用户 - * @param snapshotName 快照名 - * @param uuid 快照uuid + * @param file file name + * @param user The user to whom the file belongs + * @param snapshotName SnapshotName + * @param uuid Snapshot uuid * - * @return 错误码 + * @return error code */ virtual int CreateSnapshot(const std::string &file, const std::string &user, @@ -200,53 +200,53 @@ class SnapshotServiceManager { UUID *uuid); /** - * @brief 删除快照服务 + * @brief Delete snapshot service * - * @param uuid 快照uuid - * @param user 快照文件的用户 - * @param file 快照所属文件的文件名 + * @param uuid Snapshot uuid + * @param user The user of the snapshot file + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ virtual int DeleteSnapshot(const UUID &uuid, const std::string &user, const std::string &file); /** - * @brief 取消快照服务 + * @brief Cancel snapshot service * - * @param uuid 快照的uuid - * @param user 快照的用户 - * @param file 快照所属文件的文件名 + * @param uuid The uuid of the snapshot + * @param user snapshot user + * @param file The file name of the file to which the snapshot belongs * - * @return 错误码 + * @return error code */ virtual int CancelSnapshot(const UUID &uuid, const std::string &user, const std::string &file); /** - * @brief 获取文件的快照信息服务接口 + * @brief Gets the snapshot information service interface for files * - * @param file 文件名 - * @param user 用户名 - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param info snapshot information list * - * @return 错误码 + * @return error code */ virtual int GetFileSnapshotInfo(const std::string &file, const std::string &user, std::vector *info); /** - * @brief 根据Id获取文件的快照信息 + * @brief Obtain snapshot information of the file based on the ID * - * @param file 文件名 - * @param user 用户名 - * @param uuid 快照Id - * @param info 快照信息列表 + * @param file file name + * @param user username + * @param uuid SnapshotId + * @param info snapshot information list * - * @return 错误码 + * @return error code */ virtual int GetFileSnapshotInfoById(const std::string &file, const std::string &user, @@ -254,32 +254,32 @@ class SnapshotServiceManager { std::vector *info); /** - * @brief 获取快照列表 + * @brief Get snapshot list * - * @param filter 过滤条件 - * @param info 快照信息列表 + * @param filter filtering conditions + * @param info snapshot information list * - * @return 错误码 + * @return error code */ virtual int GetSnapshotListByFilter(const SnapshotFilterCondition &filter, std::vector *info); /** - * @brief 恢复快照任务接口 + * @brief Restore Snapshot Task Interface * - * @return 错误码 + * @return error code */ virtual int RecoverSnapshotTask(); private: /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param user 用户名 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param user username + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ int GetFileSnapshotInfoInner( std::vector snapInfos, @@ -287,13 +287,13 @@ class SnapshotServiceManager { std::vector *info); /** - * @brief 根据快照信息获取快照任务信息 + * @brief Obtain snapshot task information based on snapshot information * - * @param snapInfos 快照信息 - * @param filter 过滤条件 - * @param[out] info 快照任务信息 + * @param snapInfos snapshot information + * @param filter filtering conditions + * @param[out] info snapshot task information * - * @return 错误码 + * @return error code */ int GetSnapshotListInner( std::vector snapInfos, @@ -301,9 +301,9 @@ class SnapshotServiceManager { std::vector *info); private: - // 快照任务管理类对象 + // Snapshot Task Management Class Object std::shared_ptr taskMgr_; - // 快照核心模块 + // Snapshot Core Module std::shared_ptr core_; }; diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.cpp b/src/snapshotcloneserver/snapshot/snapshot_task.cpp index 179f2b4617..e0dc8648ca 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task.cpp @@ -46,18 +46,18 @@ void ReadChunkSnapshotClosure::Run() { } /** - * @brief 转储快照的单个chunk + * @brief Dump a single chunk of a snapshot * @detail - * 由于单个chunk过大,chunk转储分片进行,分片大小为chunkSplitSize_, - * 步骤如下: - * 1. 创建一个转储任务transferTask,并调用DataChunkTranferInit初始化 - * 2. 调用ReadChunkSnapshot从curvefs读取chunk的一个分片 - * 3. 调用DataChunkTranferAddPart转储一个分片 - * 4. 重复2、3直到所有分片转储完成,调用DataChunkTranferComplete结束转储任务 - * 5. 中间如有读取或转储发生错误,则调用DataChunkTranferAbort放弃转储, - * 并返回错误码 + * Since a single chunk is too large, chunk dumping is done in segments, with each segment size being chunkSplitSize_. + * The steps are as follows: + * 1. Create a dump task transferTask and initialize it using DataChunkTransferInit. + * 2. Call ReadChunkSnapshot to read a segment of the chunk from CurveFS. + * 3. Call DataChunkTransferAddPart to dump a segment. + * 4. Repeat steps 2 and 3 until all segments have been dumped, and then call DataChunkTransferComplete to end the dump task. + * 5. If there are any errors during reading or dumping in the process, call DataChunkTransferAbort to abandon the dump + * and return an error code. * - * @return 错误码 + * @return Error code */ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { ChunkDataName name = taskInfo_->name_; @@ -113,7 +113,7 @@ int TransferSnapshotDataChunkTask::TransferSnapshotDataChunk() { std::list results = tracker->PopResultContexts(); if (0 == results.size()) { - // 已经完成,没有新的结果了 + // Completed, no new results break; } ret = HandleReadChunkSnapshotResultsAndRetry( diff --git a/src/snapshotcloneserver/snapshot/snapshot_task.h b/src/snapshotcloneserver/snapshot/snapshot_task.h index bf53993a61..5469ae15e7 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task.h @@ -38,14 +38,14 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务信息 + * @brief snapshot task information */ class SnapshotTaskInfo : public TaskInfo { public: /** - * @brief 构造函数 + * @brief constructor * - * @param snapInfo 快照信息 + * @param snapInfo snapshot information */ explicit SnapshotTaskInfo(const SnapshotInfo &snapInfo, std::shared_ptr metric) @@ -54,27 +54,27 @@ class SnapshotTaskInfo : public TaskInfo { metric_(metric) {} /** - * @brief 获取快照信息 + * @brief Get snapshot information * - * @return 快照信息 + * @return snapshot information */ SnapshotInfo& GetSnapshotInfo() { return snapshotInfo_; } /** - * @brief 获取快照uuid + * @brief Get snapshot uuid * - * @return 快照uuid + * @return snapshot uuid */ UUID GetUuid() const { return snapshotInfo_.GetUuid(); } /** - * @brief 获取文件名 + * @brief Get file name * - * @return 文件名 + * @return file name */ std::string GetFileName() const { return snapshotInfo_.GetFileName(); @@ -85,9 +85,9 @@ class SnapshotTaskInfo : public TaskInfo { } private: - // 快照信息 + // Snapshot Information SnapshotInfo snapshotInfo_; - // metric 信息 + // Metric Information std::shared_ptr metric_; }; @@ -95,10 +95,10 @@ class SnapshotTaskInfo : public TaskInfo { class SnapshotTask : public Task { public: /** - * @brief 构造函数 + * @brief constructor * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information */ SnapshotTask(const TaskIdType &taskId, std::shared_ptr taskInfo, @@ -108,32 +108,32 @@ class SnapshotTask : public Task { core_(core) {} /** - * @brief 获取快照任务信息对象指针 + * @brief Get snapshot task information object pointer * - * @return 快照任务信息对象指针 + * @return Snapshot task information object pointer */ std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: - // 快照任务信息 + // Snapshot Task Information std::shared_ptr taskInfo_; - // 快照核心逻辑对象 + // Snapshot Core Logical Object std::shared_ptr core_; }; /** - * @brief 创建快照任务 + * @brief Create snapshot task */ class SnapshotCreateTask : public SnapshotTask { public: /** - * @brief 构造函数 + * @brief constructor * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object */ SnapshotCreateTask(const TaskIdType &taskId, std::shared_ptr taskInfo, @@ -141,7 +141,7 @@ class SnapshotCreateTask : public SnapshotTask { : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ void Run() override { core_->HandleCreateSnapshotTask(taskInfo_); @@ -149,16 +149,16 @@ class SnapshotCreateTask : public SnapshotTask { }; /** - * @brief 删除快照任务 + * @brief Delete snapshot task */ class SnapshotDeleteTask : public SnapshotTask { public: /** - * @brief 构造函数 + * @brief constructor * - * @param taskId 快照任务id - * @param taskInfo 快照任务信息 - * @param core 快照核心逻辑对象 + * @param taskId Snapshot task ID + * @param taskInfo snapshot task information + * @param core snapshot core logical object */ SnapshotDeleteTask(const TaskIdType &taskId, std::shared_ptr taskInfo, @@ -166,7 +166,7 @@ class SnapshotDeleteTask : public SnapshotTask { : SnapshotTask(taskId, taskInfo, core) {} /** - * @brief 快照执行函数 + * @brief snapshot execution function */ void Run() override { core_->HandleDeleteSnapshotTask(taskInfo_); @@ -174,21 +174,21 @@ class SnapshotDeleteTask : public SnapshotTask { }; struct ReadChunkSnapshotContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seq uint64_t seqNum; - // 分片的索引 + // Fragmented index uint64_t partIndex; - // 分片的buffer + // Sliced buffer std::unique_ptr buf; - // 分片长度 + // Slice length uint64_t len; - // 返回值 + // Return value int retCode; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -255,32 +255,32 @@ class TransferSnapshotDataChunkTask : public TrackerTask { private: /** - * @brief 转储快照单个chunk + * @brief Dump snapshot single chunk * - * @return 错误码 + * @return error code */ int TransferSnapshotDataChunk(); /** - * @brief 开始异步ReadSnapshotChunk + * @brief Start asynchronous ReadSnapshotChunk * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param context ReadSnapshotChunk上下文 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param context ReadSnapshotChunk context * - * @return 错误码 + * @return error code */ int StartAsyncReadChunkSnapshot( std::shared_ptr tracker, std::shared_ptr context); /** - * @brief 处理ReadChunkSnapshot的结果并重试 + * @brief Process the results of ReadChunkSnapshot and try again * - * @param tracker 异步ReadSnapshotChunk追踪器 - * @param transferTask 转储任务 - * @param results ReadChunkSnapshot结果列表 + * @param tracker asynchronous ReadSnapshotChunk tracker + * @param transferTask Dump Task + * @param results ReadChunkSnapshot result list * - * @return 错误码 + * @return error code */ int HandleReadChunkSnapshotResultsAndRetry( std::shared_ptr tracker, diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp index aa57505b9f..67c80c57e7 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.cpp @@ -39,7 +39,7 @@ int SnapshotTaskManager::Start() { return ret; } isStop_.store(false); - // isStop_标志先置,防止backEndThread先退出 + // isStop_ Flag set first to prevent backEndThread from exiting first backEndThread = std::thread(&SnapshotTaskManager::BackEndThreadFunc, this); } @@ -58,7 +58,7 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { if (isStop_.load()) { return kErrCodeServiceIsStop; } - // 移除实际已完成的task,防止uuid冲突 + // Remove actual completed tasks to prevent uuid conflicts ScanWorkingTask(); { @@ -73,7 +73,7 @@ int SnapshotTaskManager::PushTask(std::shared_ptr task) { } snapshotMetric_->snapshotWaiting << 1; - // 立即执行task + // Execute task immediately ScanWaitingTask(); return kErrCodeSuccess; } @@ -90,7 +90,7 @@ std::shared_ptr SnapshotTaskManager::GetTask( int SnapshotTaskManager::CancelTask(const TaskIdType &taskId) { { - // 还在等待队列的Cancel直接移除 + // Waiting for the Cancel of the queue to be directly removed WriteLockGuard taskMapWlock(taskMapLock_); LockGuard waitingTasksLock(waitingTasksLock_); for (auto it = waitingTasks_.begin(); diff --git a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h index a22eb0e2ae..3cca4c99a7 100644 --- a/src/snapshotcloneserver/snapshot/snapshot_task_manager.h +++ b/src/snapshotcloneserver/snapshot/snapshot_task_manager.h @@ -47,12 +47,12 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照任务管理器类 + * @brief Snapshot Task Manager Class */ class SnapshotTaskManager { public: /** - * @brief 默认构造函数 + * @brief default constructor */ SnapshotTaskManager( std::shared_ptr core, @@ -63,7 +63,7 @@ class SnapshotTaskManager { snapshotTaskManagerScanIntervalMs_(0) {} /** - * @brief 析构函数 + * @brief destructor */ ~SnapshotTaskManager() { Stop(); @@ -78,88 +78,88 @@ class SnapshotTaskManager { } /** - * @brief 启动 + * @brief start * - * @return 错误码 + * @return error code */ int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ void Stop(); /** - * @brief 添加任务 + * @brief Add Task * - * @param task 快照任务 + * @param task snapshot task * - * @return 错误码 + * @return error code */ int PushTask(std::shared_ptr task); /** - * @brief 获取任务 + * @brief Get Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 快照任务指针 + * @return Snapshot Task Pointer */ std::shared_ptr GetTask(const TaskIdType &taskId) const; /** - * @brief 取消任务 + * @brief Cancel Task * - * @param taskId 任务id + * @param taskId Task ID * - * @return 错误码 + * @return error code */ int CancelTask(const TaskIdType &taskId); private: /** - * @brief 后台线程执行函数 + * @brief Background Thread Execution Function * - * 定期执行扫描等待队列函数与扫描工作队列函数。 + * Regularly execute the scan wait queue function and scan work queue function */ void BackEndThreadFunc(); /** - * @brief 扫描等待任务队列函数 + * @brief Scan Waiting Task Queue Function * - * 扫描等待队列,判断工作队列中当前文件 - * 是否有正在执行的快照,若没有则放入工作队列 + * Scan the waiting queue to determine the current file in the work queue + * Check if there is an active snapshot; if not, place it in the work queue * */ void ScanWaitingTask(); /** - * @brief 扫描工作队列函数 + * @brief Scan Work Queue Function * - * 扫描工作队列,判断工作队列中当前 - * 快照任务是否已完成,若完成则移出工作队列 + * Scan the work queue to determine the current status in the work queue + * Check if the snapshot task has been completed; if completed, remove it from the work queue * */ void ScanWorkingTask(); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->快照任务表 + // Id ->Snapshot Task Table std::map > taskMap_; mutable RWLock taskMapLock_; - // 快照等待队列 + // Snapshot waiting queue std::list > waitingTasks_; mutable Mutex waitingTasksLock_; - // 快照工作队列,实际是个map,其中key是文件名,以便于查询 + // The snapshot work queue is actually a map, where key is the file name for easy query std::map > workingTasks_; mutable Mutex workingTasksLock_; std::shared_ptr threadpool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Indicates whether the current task management is stopped, used to support start and stop functions. std::atomic_bool isStop_; // snapshot core @@ -168,7 +168,7 @@ class SnapshotTaskManager { // metric std::shared_ptr snapshotMetric_; - // 快照后台线程扫描等待队列和工作队列的扫描周期(单位:ms) + // Scanning cycle of snapshot background thread scanning waiting queue and work queue (unit: ms) int snapshotTaskManagerScanIntervalMs_; }; diff --git a/src/snapshotcloneserver/snapshotclone_server.cpp b/src/snapshotcloneserver/snapshotclone_server.cpp index 91a6c199e7..84e3fe95ed 100644 --- a/src/snapshotcloneserver/snapshotclone_server.cpp +++ b/src/snapshotcloneserver/snapshotclone_server.cpp @@ -320,7 +320,7 @@ bool SnapShotCloneServer::Init() { } bool SnapShotCloneServer::Start(void) { - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 + // Start the clone service first and then the snapshot service, because there is a clone reference when deleting snapshot dependencies int ret = cloneServiceManager_->Start(); if (ret < 0) { LOG(ERROR) << "cloneServiceManager start fail" diff --git a/src/snapshotcloneserver/snapshotclone_server.h b/src/snapshotcloneserver/snapshotclone_server.h index 47163ddac4..4d550e4b12 100644 --- a/src/snapshotcloneserver/snapshotclone_server.h +++ b/src/snapshotcloneserver/snapshotclone_server.h @@ -86,38 +86,38 @@ class SnapShotCloneServer { explicit SnapShotCloneServer(std::shared_ptr config) :conf_(config) {} /** - * @brief 通过配置初始化snapshotcloneserver所需要的所有配置 + * @brief: Initialize all configurations required for snapshotcloneserver through configuration */ void InitAllSnapshotCloneOptions(void); /** - * @brief leader选举,未选中持续等待,选中情况下建立watch并返回 + * @brief leader election, if not selected, continue to wait. If selected, establish a watch and return */ void StartCompaginLeader(void); /** - * @brief 启动dummyPort 用于检查主备snapshotserver - * 存活和各种config metric 和版本信息 + * @brief: Start dummyPort to check the active and standby snapshotserver + * Survival and various configuration metrics and version information */ void StartDummy(void); /** - * @brief 初始化clone与snapshot 各种核心结构 + * @brief initializes various core structures of clone and snapshot */ bool Init(void); /** - * @brief 启动各个组件的逻辑和线程池 + * @brief: Start the logic and thread pool of each component */ bool Start(void); /** - * @brief 停止所有服务 + * @brief Stop all services */ void Stop(void); /** - * @brief 启动RPC服务直到外部kill + * @brief Start RPC service until external kill */ void RunUntilQuit(void); @@ -127,9 +127,9 @@ class SnapShotCloneServer { private: std::shared_ptr conf_; SnapShotCloneServerOptions snapshotCloneServerOptions_; - // 标记自己为active/standby + // Mark yourself as active/standby bvar::Status status_; - // 与etcd交互的client + // Client interacting with ETCD std::shared_ptr etcdClient_; std::shared_ptr leaderElection_; diff --git a/src/snapshotcloneserver/snapshotclone_service.cpp b/src/snapshotcloneserver/snapshotclone_service.cpp index 72f6b04683..e706af1fb3 100644 --- a/src/snapshotcloneserver/snapshotclone_service.cpp +++ b/src/snapshotcloneserver/snapshotclone_service.cpp @@ -263,7 +263,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -271,7 +271,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotInfoAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -526,7 +526,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -534,7 +534,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTasksAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -684,7 +684,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -692,7 +692,7 @@ void SnapshotCloneServiceImpl::HandleGetFileSnapshotListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { @@ -784,7 +784,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( HandleBadRequestError(bcntl, requestId); return; } - // 默认值为10 + // Default value is 10 uint64_t limitNum = 10; if ((limit != nullptr) && !limit->empty()) { if (!curve::common::StringToUll(*limit, &limitNum)) { @@ -792,7 +792,7 @@ void SnapshotCloneServiceImpl::HandleGetCloneTaskListAction( return; } } - // 默认值为0 + // Default value is 0 uint64_t offsetNum = 0; if ((offset != nullptr) && !offset->empty()) { if (!curve::common::StringToUll(*offset, &offsetNum)) { diff --git a/src/snapshotcloneserver/snapshotclone_service.h b/src/snapshotcloneserver/snapshotclone_service.h index 6ba1f34f48..9e7a7e4cc9 100644 --- a/src/snapshotcloneserver/snapshotclone_service.h +++ b/src/snapshotcloneserver/snapshotclone_service.h @@ -38,14 +38,14 @@ using ::google::protobuf::RpcController; using ::google::protobuf::Closure; /** - * @brief 快照转储rpc服务实现 + * @brief snapshot dump rpc service implementation */ class SnapshotCloneServiceImpl : public SnapshotCloneService { public: /** - * @brief 构造函数 + * @brief constructor * - * @param manager 快照转储服务管理对象 + * @param manager snapshot dump service management object */ SnapshotCloneServiceImpl( std::shared_ptr snapshotManager, @@ -55,12 +55,12 @@ class SnapshotCloneServiceImpl : public SnapshotCloneService { virtual ~SnapshotCloneServiceImpl() {} /** - * @brief http服务默认方法 + * @brief HTTP service default method * * @param cntl rpc controller - * @param req http请求报文 - * @param resp http回复报文 - * @param done http异步回调闭包 + * @param req HTTP request message + * @param resp HTTP reply message + * @param done HTTP asynchronous callback closure */ void default_method(RpcController* cntl, const HttpRequest* req, @@ -104,7 +104,7 @@ class SnapshotCloneServiceImpl : public SnapshotCloneService { const std::string &uuid = ""); private: - // 快照转储服务管理对象 + // Snapshot Dump Service Management Object std::shared_ptr snapshotManager_; std::shared_ptr cloneManager_; }; diff --git a/src/tools/chunkserver_client.cpp b/src/tools/chunkserver_client.cpp index 69eb492d5c..5ca8eb1ed8 100644 --- a/src/tools/chunkserver_client.cpp +++ b/src/tools/chunkserver_client.cpp @@ -69,7 +69,7 @@ int ChunkServerClient::GetRaftStatus(butil::IOBuf* iobuf) { } retryTimes++; } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -122,7 +122,7 @@ int ChunkServerClient::GetCopysetStatus( return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -159,7 +159,7 @@ int ChunkServerClient::GetChunkHash(const Chunk& chunk, return 0; } } - // 只打最后一次失败的原因 + // The reason for only the last defeat std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 5945737ae8..1401ac656a 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -58,39 +58,39 @@ class ChunkServerClient { public: virtual ~ChunkServerClient() = default; /** - * @brief 初始化channel,对一个地址,初始化一次就好 - * @param csAddr chunkserver地址 - * @return 成功返回0,失败返回-1 + * @brief initializes the channel. For an address, just initialize it once + * @param csAddr chunkserver address + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& csAddr); /** - * @brief 调用braft的RaftStat接口获取复制组的详细信息,放到iobuf里面 - * @param iobuf 复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief: Call the RaftStat interface of Braft to obtain detailed information about the replication group, and place it in iobuf + * @param iobuf replication group details, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetRaftStatus(butil::IOBuf* iobuf); /** - * @brief 检查chunkserver是否在线,只检查controller,不检查response - * @return 在线返回true,不在线返回false + * @brief: Check if the chunkserver is online, only check the controller, not the response + * @return returns true online and false offline */ virtual bool CheckChunkServerOnline(); /** - * @brief 调用chunkserver的GetCopysetStatus接口 - & @param request 查询copyset的request - * @param response 返回的response,里面有复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief calls the GetCopysetStatus interface of chunkserver + & @param request Query the request for the copyset + * @param response The response returned contains detailed information about the replication group, which is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief 从chunkserver获取chunk的hash值 - & @param chunk 要查询的chunk - * @param[out] chunkHash chunk的hash值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the hash value of chunks from chunkserver + & @param chunk The chunk to query + * @param[out] The hash value chunkHash chunk, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); diff --git a/src/tools/chunkserver_tool_factory.h b/src/tools/chunkserver_tool_factory.h index 9a1e344b3c..b956ad3427 100644 --- a/src/tools/chunkserver_tool_factory.h +++ b/src/tools/chunkserver_tool_factory.h @@ -38,20 +38,20 @@ using curve::fs::Ext4FileSystemImpl; class ChunkServerToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateChunkServerTool( const std::string& command); private: /** - * @brief 获取CurveMetaTool实例 + * @brief Get CurveMetaTool instance */ static std::shared_ptr GenerateCurveMetaTool(); /** - * @brief 获取RaftLogMetaTool实例 + * @brief Get RaftLogMetaTool instance */ static std::shared_ptr GenerateRaftLogTool(); }; diff --git a/src/tools/common.cpp b/src/tools/common.cpp index 35f29bf738..cdcdc369ba 100644 --- a/src/tools/common.cpp +++ b/src/tools/common.cpp @@ -29,11 +29,11 @@ namespace curve { namespace tool { void TrimMetricString(std::string* str) { - // 去掉头部空格 + // Remove header spaces str->erase(0, str->find_first_not_of(" ")); - // 去掉尾部回车 + // Remove the rear carriage return str->erase(str->find_last_not_of("\r\n") + 1); - // 去掉两边双引号 + // Remove double quotes from both sides str->erase(0, str->find_first_not_of("\"")); str->erase(str->find_last_not_of("\"") + 1); } diff --git a/src/tools/common.h b/src/tools/common.h index 1465a76ac7..143a4b3793 100644 --- a/src/tools/common.h +++ b/src/tools/common.h @@ -34,9 +34,9 @@ namespace curve { namespace tool { /** - * @brief 格式化,从metric获取的string - * 去掉string两边的双引号以及空格和回车 - * @param[out] str 要格式化的string + * @brief formatting, string obtained from metric + * Remove double quotes, spaces, and carriage returns around the string + * @param[out] str The string to format */ void TrimMetricString(std::string* str); diff --git a/src/tools/consistency_check.cpp b/src/tools/consistency_check.cpp index e3a84366ae..9356436008 100644 --- a/src/tools/consistency_check.cpp +++ b/src/tools/consistency_check.cpp @@ -25,11 +25,11 @@ #include "src/tools/consistency_check.h" DEFINE_string(filename, "", "filename to check consistency"); -DEFINE_bool(check_hash, true, R"(用户需要先确认copyset的applyindex一致之后 - 再去查copyset内容是不是一致。通常需要先设置 - check_hash = false先检查copyset的applyindex是否一致 - 如果一致了再设置check_hash = true, - 检查copyset内容是不是一致)"); +DEFINE_bool(check_hash, true, R"(Users need to confirm whether the apply index of the copyset is consistent + before checking if the copyset content is consistent. Usually, you should first set + check_hash = false to initially verify if the apply index of the copyset is consistent. + Once confirmed, then set check_hash = true, + to check if the copyset content is consistent)"); DEFINE_uint32(chunkServerBasePort, 8200, "base port of chunkserver"); DECLARE_string(mdsAddr); @@ -180,9 +180,9 @@ int ConsistencyCheck::CheckCopysetConsistency( std::string csAddr = hostIp + ":" + std::to_string(port); csAddrs.emplace_back(csAddr); } - // 检查当前copyset的chunkserver内容是否一致 + // Check if the chunkserver content of the current copyset is consistent if (checkHash) { - // 先检查apply index是否一致 + // First, check if the application index is consistent res = CheckApplyIndex(copyset, csAddrs); if (res != 0) { std::cout << "Apply index not match when check hash!" << std::endl; diff --git a/src/tools/consistency_check.h b/src/tools/consistency_check.h index 12e12346b9..be43e20335 100644 --- a/src/tools/consistency_check.h +++ b/src/tools/consistency_check.h @@ -61,111 +61,111 @@ class ConsistencyCheck : public CurveTool { ~ConsistencyCheck() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ void PrintHelp(const std::string &cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string &cmd) override; /** - * @brief 检查三副本一致性 - * @param fileName 要检查一致性的文件名 - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 一致返回0,否则返回-1 + * @brief Check consistency of three replicas + * @param fileName The file name to check for consistency + * @param checkHash Does check hash? If false, check apply index instead of hash + * @return consistently returns 0, otherwise returns -1 */ int CheckFileConsistency(const std::string& fileName, bool checkHash); /** - * @brief 检查copyset的三副本一致性 - * @param copysetId 要检查的copysetId - * @param checkHash 是否检查hash,如果为false,检查apply index而不是hash - * @return 成功返回0,失败返回-1 + * @brief Check the consistency of the three copies of the copyset + * @param copysetId The copysetId to be checked + * @param checkHash Does check hash? If false, check apply index instead of hash + * @return returns 0 for success, -1 for failure */ int CheckCopysetConsistency(const CopySet copysetId, bool checkHash); /** - * @brief 打印帮助信息 + * @brief Print help information */ void PrintHelp(); /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 初始化 + * @brief initialization */ int Init(); /** - * @brief 从mds获取文件所在的copyset列表 - * @param fileName 文件名 - * @param[out] copysetIds copysetId的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of copysets where the file is located from mds + * @param fileName File name + * @param[out] copysetIds The list copysetIds is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ int FetchFileCopyset(const std::string& fileName, std::set* copysets); /** - * @brief 从chunkserver获取copyset的状态 - * @param csAddr chunkserver地址 - * @param copysetId 要获取的copysetId - * @param[out] response 返回的response - * @return 成功返回0,失败返回-1 + * @brief Get the status of copyset from chunkserver + * @param csAddr chunkserver address + * @param copysetId The copysetId to obtain + * @param[out] response The response returned + * @return returns 0 for success, -1 for failure */ int GetCopysetStatusResponse(const std::string& csAddr, const CopySet copyset, CopysetStatusResponse* response); /** - * @brief 检查copyset中指定chunk的hash的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the hash of the specified chunk in the copyset + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the copyset + * @return consistently returns 0, otherwise returns -1 */ int CheckCopysetHash(const CopySet& copyset, const CsAddrsType& csAddrs); /** - * @brief chunk在三个副本的hash的一致性 - * @param chunk 要检查的chunk - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Consistency of hash in three replicates of chunk + * @param chunk The chunk to be checked + * @param csAddrs The address of the chunkserver corresponding to the copyset + * @return consistently returns 0, otherwise returns -1 */ int CheckChunkHash(const Chunk& chunk, const CsAddrsType& csAddrs); /** - * @brief 检查副本间applyindex的一致性 - * @param copysetId 要检查的copysetId - * @param csAddrs copyset对应的chunkserver的地址 - * @return 一致返回0,否则返回-1 + * @brief Check the consistency of the applyindex between replicas + * @param copysetId The copysetId to be checked + * @param csAddrs The address of the chunkserver corresponding to the copyset + * @return consistently returns 0, otherwise returns -1 */ int CheckApplyIndex(const CopySet copyset, const CsAddrsType& csAddrs); private: - // 文件所在的逻辑池id + // The logical pool ID where the file is located PoolIdType lpid_; - // 用来与mds的nameservice接口交互 + // Used to interact with the nameservice interface of mds std::shared_ptr nameSpaceToolCore_; - // 向chunkserver发送RPC的client + // Client sending RPC to chunkserver std::shared_ptr csClient_; - // copyset中需要检查hash的chunk + // The chunk of the hash needs to be checked in the copyset std::map> chunksInCopyset_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check.cpp b/src/tools/copyset_check.cpp index 06341e5291..f9708d5a32 100644 --- a/src/tools/copyset_check.cpp +++ b/src/tools/copyset_check.cpp @@ -76,7 +76,7 @@ int CopysetCheck::RunCommand(const std::string& command) { return -1; } if (command == kCheckCopysetCmd) { - // 检查某个copyset的状态 + // Check the status of a copyset if (FLAGS_logicalPoolId == 0 || FLAGS_copysetId == 0) { std::cout << "logicalPoolId AND copysetId should be specified!" << std::endl; @@ -84,7 +84,7 @@ int CopysetCheck::RunCommand(const std::string& command) { } return CheckCopyset(); } else if (command == kCheckChunnkServerCmd) { - // 检查某个chunkserver上的所有copyset + // Check all copysets on a certain chunkserver CHECK_ONLY_ONE_SHOULD_BE_SPECIFIED(chunkserverAddr, chunkserverId); return CheckChunkServer(); } else if (command == kCheckServerCmd) { @@ -282,7 +282,7 @@ void CopysetCheck::PrintDetail() { PrintCopySet(item.second); } std::cout << std::endl; - // 打印有问题的chunkserver + //Printing problematic chunkservers PrintExcepChunkservers(); } diff --git a/src/tools/copyset_check.h b/src/tools/copyset_check.h index b4fa76c28f..a0be312e6e 100644 --- a/src/tools/copyset_check.h +++ b/src/tools/copyset_check.h @@ -54,89 +54,89 @@ class CopysetCheck : public CurveTool { ~CopysetCheck() = default; /** - * @brief 根据flag检查复制组健康状态 - * 复制组健康的标准,没有任何副本处于以下状态,下面的顺序按优先级排序, - * 即满足上面一条,就不会检查下面一条 - * 1、leader为空(复制组的信息以leader处的为准,没有leader无法检查) - * 2、配置中的副本数量不足 - * 3、有副本不在线 - * 4、有副本在安装快照 - * 5、副本间log index差距太大 - * 6、对于集群来说,还要判断一下chunkserver上的copyset数量和leader数量是否均衡, - * 避免后续会有调度使得集群不稳定 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 - * @return 成功返回0,失败返回-1 + * @brief Check the health status of the replication group based on the flag + * The standard for replication group health is that no replica is in the following state. The following order is sorted by priority, + * If the above one is met, the following one will not be checked + * 1. The leader is empty (the information of the replication group is based on the leader, and cannot be checked without a leader) + * 2. Insufficient number of replicas in the configuration + * 3. Some replicas are not online + * 4. There is a replica in the installation snapshot + * 5. The log index difference between replicas is too large + * 6. For a cluster, it is also necessary to determine whether the number of copysets and the number of leaders on the chunkserver are balanced, + * Avoid scheduling that may cause instability in the cluster in the future + * @param command The command to be executed by currently includescheck-copyset, check-chunkserver, + * check-server, check-cluster, etc + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 - * @param command 要执行的命令,目前有check-copyset,check-chunkserver, - * check-server,check-cluster等 + * @brief Print help information + * @param command The command to be executed by currently includescheck-copyset, check-chunkserver, + * check-server, check-cluster, etc */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true / false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 初始化 + * @brief initialization */ int Init(); /** - * @brief 检查单个copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check a single copyset + * @return Health returns 0, otherwise returns -1 */ int CheckCopyset(); /** - * @brief 检查chunkserver上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on chunkserver + * @return Health returns 0, otherwise returns -1 */ int CheckChunkServer(); /** - * @brief 检查server上所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets on the server + * @return Health returns 0, otherwise returns -1 */ int CheckServer(); /** - * @brief 检查集群所有copyset - * @return 健康返回0,其他情况返回-1 + * @brief Check all copysets in the cluster + * @return Health returns 0, otherwise returns -1 */ int CheckCopysetsInCluster(); /** - * @brief 检查mds端的operator - * @return 无operator返回0,其他情况返回-1 + * @brief Check the operator on the mds side + * @return returns 0 without an operator, otherwise returns -1 */ int CheckOperator(const std::string& opName); - // 打印copyset检查的详细结果 + // Print detailed results of copyset check void PrintDetail(); void PrintCopySet(const std::set& set); - // 打印检查的结果,一共多少copyset,有多少不健康 + // Print the results of the inspection, how many copies are there in total, and how many are unhealthy void PrintStatistic(); - // 打印有问题的chunkserver列表 + // Print a list of problematic chunkservers void PrintExcepChunkservers(); - // 打印大多数不在线的副本上面的卷 + // Print the volume on most offline copies int PrintMayBrokenVolumes(); private: - // 检查copyset的核心逻辑 + // Check the core logic of copyset std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/copyset_check_core.cpp b/src/tools/copyset_check_core.cpp index f32a7a923d..980d931ab8 100644 --- a/src/tools/copyset_check_core.cpp +++ b/src/tools/copyset_check_core.cpp @@ -69,7 +69,7 @@ CheckResult CopysetCheckCore::CheckOneCopyset(const PoolIdType& logicalPoolId, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; ++offlinePeers; @@ -131,7 +131,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::cout << "GetChunkServerInfo from mds fail!" << std::endl; return -1; } - // 如果chunkserver retired的话不发送请求 + // If chunkserver is redirected, do not send the request if (csInfo.status() == ChunkServerStatus::RETIRED) { std::cout << "ChunkServer is retired!" << std::endl; return 0; @@ -139,7 +139,7 @@ int CopysetCheckCore::CheckCopysetsOnChunkServer( std::string hostIp = csInfo.hostip(); uint64_t port = csInfo.port(); std::string csAddr = hostIp + ":" + std::to_string(port); - // 向chunkserver发送RPC请求获取raft state + // Send RPC request to chunkserver to obtain raft state ChunkServerHealthStatus csStatus = CheckCopysetsOnChunkServer(csAddr, {}); if (csStatus == ChunkServerHealthStatus::kHealthy) { return 0; @@ -165,33 +165,33 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( } if (res != 0) { - // 如果查询chunkserver失败,认为不在线,把它上面所有的 - // copyset都添加到peerNotOnlineCopysets_里面 + // If querying the chunkserver fails, consider it offline and add all its + // copysets to the peerNotOnlineCopysets_. UpdatePeerNotOnlineCopysets(chunkserverAddr); serviceExceptionChunkServers_.emplace(chunkserverAddr); chunkserverCopysets_[chunkserverAddr] = {}; return ChunkServerHealthStatus::kNotOnline; } - // 存储每一个copyset的详细信息 + // Store detailed information for each copyset CopySetInfosType copysetInfos; ParseResponseAttachment(groupIds, &iobuf, ©setInfos); - // 只有查询全部chunkserver的时候才更新chunkServer上的copyset列表 + // Only update the copyset list on chunkServer when querying all chunkservers if (groupIds.empty()) { UpdateChunkServerCopysets(chunkserverAddr, copysetInfos); } - // 对应的chunkserver上没有要找的leader的copyset,可能已经迁移出去了, - // 但是follower这边还没更新,这种情况也认为chunkserver不健康 + // There is no copyset for the leader you are looking for on the corresponding chunkserver, it may have already been migrated, + // But the follower has not been updated yet, and this situation also suggests that chunkserver is unhealthy if (copysetInfos.empty() || (!groupIds.empty() && copysetInfos.size() != groupIds.size())) { std::cout << "Some copysets not found on chunkserver, may be tranfered" << std::endl; return ChunkServerHealthStatus::kNotHealthy; } - // 存储需要发送消息的chunkserver的地址和对应的groupId - // key是chunkserver地址,value是groupId的列表 + // Store the address and corresponding groupId of the chunkserver that needs to send messages + // Key is the chunkserver address, and value is a list of groupIds std::map> csAddrMap; - // 存储没有leader的copyset对应的peers,key为groupId,value为配置 + // Store the peers corresponding to the copyset without a leader, with key as groupId and value as configuration std::map> noLeaderCopysetsPeers; for (auto& copysetInfo : copysetInfos) { std::string groupId = copysetInfo[kGroupId]; @@ -228,8 +228,8 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( break; } } else if (state == kStateFollower) { - // 如果没有leader,检查是否是大多数不在线 - // 是的话标记为大多数不在线,否则标记为No leader + // If there is no leader, check if most are offline + // If yes, mark it as mostly offline, otherwise mark it as No leader if (copysetInfo.count(kLeader) == 0 || copysetInfo[kLeader] == kEmptyAddr) { std::vector peers; @@ -238,7 +238,7 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( continue; } if (queryLeader) { - // 向leader发送rpc请求 + // Send an rpc request to the leader auto pos = copysetInfo[kLeader].rfind(":"); auto csAddr = copysetInfo[kLeader].substr(0, pos); csAddrMap[csAddr].emplace(groupId); @@ -247,22 +247,22 @@ ChunkServerHealthStatus CopysetCheckCore::CheckCopysetsOnChunkServer( copysets_[kNoLeader].emplace(groupId); isHealthy = false; } else { - // 其他情况有ERROR,UNINITIALIZED,SHUTTING和SHUTDOWN,这种都认为不健康,统计到 - // copyset里面 + // In other cases such as ERROR, UNINITIALIZED, SHUTTING, and SHUTDOWN, + // they are considered unhealthy and are counted within the copyset. std::string key = "state " + copysetInfo[kState]; copysets_[key].emplace(groupId); isHealthy = false; } } - // 遍历没有leader的copyset + // Traverse copysets without leaders bool health = CheckCopysetsNoLeader(chunkserverAddr, noLeaderCopysetsPeers); if (!health) { isHealthy = false; } - // 遍历chunkserver发送请求 + // Traverse chunkserver to send requests for (const auto& item : csAddrMap) { ChunkServerHealthStatus res = CheckCopysetsOnChunkServer(item.first, item.second); @@ -296,7 +296,7 @@ bool CopysetCheckCore::CheckCopysetsNoLeader(const std::string& csAddr, return false; } for (const auto& item : result) { - // 如果在配置组中,检查是否是majority offline + // If in the configuration group, check if it is a majority offline if (item.second) { isHealthy = false; std::string groupId = item.first; @@ -390,7 +390,7 @@ int CopysetCheckCore::CheckCopysetsOnServer(const ServerIdType& serverId, const std::string& serverIp, bool queryLeader, std::vector* unhealthyChunkServers) { bool isHealthy = true; - // 向mds发送RPC + // Send RPC to mds int res = 0; std::vector chunkservers; if (serverId > 0) { @@ -450,18 +450,18 @@ int CopysetCheckCore::CheckCopysetsInCluster() { isHealthy = false; } } - // 检查从chunkserver上获取的copyset数量与mds记录的数量是否一致 + // Check if the number of copysets obtained from chunkserver matches the number of mds records res = CheckCopysetsWithMds(); if (res != 0) { std::cout << "CheckCopysetNumWithMds fail!" << std::endl; return -1; } - // 如果不健康,直接返回,如果健康,还需要对operator作出判断 + // If not healthy, return directly. If healthy, make a judgment on the operator if (!isHealthy) { return -1; } - // 默认不检查operator,在测试脚本之类的要求比较严格的地方才检查operator,不然 - // 每次执行命令等待30秒很不方便 + // By default, operators are not checked, and only checked in areas with strict requirements such as test scripts, otherwise + // waiting for 30 seconds each time executing a command is inconvenient if (FLAGS_checkOperator) { int res = CheckOperator(kTotalOpName, FLAGS_operatorMaxPeriod); if (res != 0) { @@ -575,22 +575,21 @@ int CopysetCheckCore::CheckOperator(const std::string& opName, return 0; } -// 每个copyset的信息都会存储在一个map里面,map的key有 -// groupId: 复制组的groupId -// peer_id: 10.182.26.45:8210:0格式的peer id -// state: 节点的状态,LEADER,FOLLOWER,CANDIDATE等等 -// peers: 配置组里的成员,通过空格分隔 -// last_log_id: 最后一个log entry的index -// leader: state为LEADER时才存在这个key,指向复制组leader +// Information for each copyset is stored in a map. The map's keys include: +// - groupId: The groupId of the replication group. +// - peer_id: The peer id in the format 10.182.26.45:8210:0. +// - state: The node's state, which can be LEADER, FOLLOWER, CANDIDATE, etc. +// - peers: Members in the configuration group, separated by spaces. +// - last_log_id: The index of the last log entry. +// - leader: This key exists only when the state is LEADER and points to the leader of the replication group. // -// replicator_1: 第一个follower的复制状态,value如下: -// next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 ic=0 -// next_index为下一个要发送给该follower的index -// flying_append_entries_size是发出去还未确认的entry的数量 -// idle表明没有在安装快照,如果在安装快照的话是installing snapshot {12, 3}, -// 1234和3分别是快照包含的最后一个log entry的index和term -// hc,ac,ic分别是发向follower的heartbeat,append entry, -// 和install snapshot的rpc的数量 +// replicator_1: The replication status of the first follower, with values as follows: +// next_index=6349842 flying_append_entries_size=0 idle hc=1234 ac=123 ic=0 +// - next_index: The next index to be sent to this follower. +// - flying_append_entries_size: The number of unconfirmed entries that have been sent. +// - idle: Indicates whether there is no snapshot installation. If a snapshot is being installed, it will show as "installing snapshot {12, 3}", +// where 1234 and 3 are the last log entry's index and term included in the snapshot. +// - hc, ac, ic: The counts of RPCs sent to the follower for heartbeat, append entry, and install snapshot, respectively. void CopysetCheckCore::ParseResponseAttachment( const std::set& gIds, butil::IOBuf* iobuf, @@ -629,7 +628,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } } - // 找到了copyset + // Found copyset auto pos = line.npos; if (line.find(kReplicator) != line.npos) { pos = line.rfind(":"); @@ -640,7 +639,7 @@ void CopysetCheckCore::ParseResponseAttachment( continue; } std::string key = line.substr(0, pos); - // 如果是replicator,把key简化一下 + // If it's a replicator, simplify the key if (key.find(kReplicator) != key.npos) { key = kReplicator + std::to_string(i); ++i; @@ -682,7 +681,7 @@ void CopysetCheckCore::UpdateChunkServerCopysets( chunkserverCopysets_[csAddr] = copysetIds; } -// 通过发送RPC检查chunkserver是否在线 +// Check if chunkserver is online by sending RPC bool CopysetCheckCore::CheckChunkServerOnline( const std::string& chunkserverAddr) { auto csClient = (csClient_ == nullptr) ? @@ -718,7 +717,7 @@ bool CopysetCheckCore::CheckCopySetOnline(const std::string& csAddr, butil::IOBuf iobuf; int res = QueryChunkServer(csAddr, &iobuf); if (res != 0) { - // 如果查询chunkserver失败,认为不在线 + // If the query for chunkserver fails, it is considered offline serviceExceptionChunkServers_.emplace(csAddr); chunkserverCopysets_[csAddr] = {}; return false; @@ -763,19 +762,19 @@ CheckResult CopysetCheckCore::CheckPeerOnlineStatus( CheckResult CopysetCheckCore::CheckHealthOnLeader( std::map* map) { - // 先判断peers是否小于3 + // First, determine if the peers are less than 3 std::vector peers; curve::common::SplitString((*map)[kPeers], " ", &peers); if (peers.size() < FLAGS_replicasNum) { return CheckResult::kPeersNoSufficient; } std::string groupId = (*map)[kGroupId]; - // 检查不在线peer的数量 + // Check the number of offline peers CheckResult checkRes = CheckPeerOnlineStatus(groupId, peers); if (checkRes != CheckResult::kHealthy) { return checkRes; } - // 根据replicator的情况判断log index之间的差距 + // Judging the gap between log indices based on the replicator's situation uint64_t lastLogId; std::string str = (*map)[kStorage]; auto pos1 = str.find("="); @@ -849,7 +848,7 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { copysetIds.emplace_back(csInfo.copysetid()); } - // 获取每个copyset的成员 + // Get the members of each copyset std::vector csServerInfos; res = mdsClient_->GetChunkServerListInCopySets(logicalPoolId, copysetIds, @@ -858,7 +857,7 @@ void CopysetCheckCore::UpdatePeerNotOnlineCopysets(const std::string& csAddr) { std::cout << "GetChunkServerListInCopySets fail" << std::endl; return; } - // 遍历每个copyset + // Traverse each copyset for (const auto& info : csServerInfos) { std::vector peers; for (const auto& csLoc : info.cslocs()) { @@ -889,7 +888,7 @@ CopysetStatistics CopysetCheckCore::GetCopysetStatistics() { if (item.first == kTotal) { total = item.second.size(); } else { - // 求并集 + // Union unhealthyCopysets.insert(item.second.begin(), item.second.end()); } diff --git a/src/tools/copyset_check_core.h b/src/tools/copyset_check_core.h index 6e93a373c7..fcbfb710e9 100644 --- a/src/tools/copyset_check_core.h +++ b/src/tools/copyset_check_core.h @@ -65,27 +65,27 @@ using CopySet = std::pair; using CopySetInfosType = std::vector>; enum class CheckResult { - // copyset健康 + // copyset Health kHealthy = 0, - // 解析结果失败 + // Parsing result failed kParseError = -1, - // peer数量小于预期 + // The number of peers is less than expected kPeersNoSufficient = -2, - // 副本间的index差距太大 + // The index difference between replicas is too large kLogIndexGapTooBig = -3, - // 有副本在安装快照 + // There is a replica installing the snapshot kInstallingSnapshot = -4, - // 少数副本不在线 + // A few instances are not online kMinorityPeerNotOnline = -5, - // 大多数副本不在线 + // Most replicas are not online kMajorityPeerNotOnline = -6, kOtherErr = -7 }; enum class ChunkServerHealthStatus { - kHealthy = 0, // chunkserver上所有copyset健康 - kNotHealthy = -1, // chunkserver上有copyset不健康 - kNotOnline = -2 // chunkserver不在线 + kHealthy = 0, // All copysets on chunkserver are healthy + kNotHealthy = -1, // Copyset on chunkserver is unhealthy + kNotOnline = -2 // Chunkserver is not online }; struct CopysetStatistics { @@ -114,9 +114,9 @@ class CopysetCheckCore { virtual ~CopysetCheckCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); @@ -132,21 +132,21 @@ class CopysetCheckCore { const CopySetIdType& copysetId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 + * @brief Check the health status of all copysets on a certain chunkserver * * @param chunkserId chunkserverId * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ virtual int CheckCopysetsOnChunkServer( const ChunkServerIdType& chunkserverId); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 + * @brief Check the health status of all copysets on a certain chunkserver * - * @param chunkserAddr chunkserver地址 + * @param chunkserAddr chunkserver address * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ virtual int CheckCopysetsOnChunkServer(const std::string& chunkserverAddr); @@ -156,52 +156,52 @@ class CopysetCheckCore { virtual int CheckCopysetsOnOfflineChunkServer(); /** - * @brief 检查某个server上的所有copyset的健康状态 + * @brief Check the health status of all copysets on a server * - * @param serverId server的id - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 + * @param serverId Server ID + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy chunkservers with copyset on the server * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ virtual int CheckCopysetsOnServer(const ServerIdType& serverId, std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查某个server上的所有copyset的健康状态 + * @brief Check the health status of all copysets on a server * - * @param serverId server的ip - * @param[out] unhealthyChunkServers 可选参数,server上copyset不健康的chunkserver的列表 + * @param serverId IP of server + * @param[out] unhealthyChunkServers optional parameter, a list of unhealthy chunkservers with copyset on the server * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ virtual int CheckCopysetsOnServer(const std::string& serverIp, std::vector* unhealthyChunkServers = nullptr); /** - * @brief 检查集群中所有copyset的健康状态 + * @brief Check the health status of all copysets in the cluster * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ virtual int CheckCopysetsInCluster(); /** - * @brief 检查集群中的operator - * @param opName operator的名字 - * @param checkTimeSec 检查时间 - * @return 检查正常返回0,检查失败或存在operator返回-1 + * @brief Check the operators in the cluster + * @param opName The name of the operator + * @param checkTimeSec check time + * @return returns 0 if the check is normal, or -1 if the check fails or there is an operator present */ virtual int CheckOperator(const std::string& opName, uint64_t checkTimeSec); /** - * @brief 计算不健康的copyset的比例,检查后调用 - * @return 不健康的copyset的比例 + * @brief Calculate the proportion of unhealthy copysets, check and call + * @return The proportion of unhealthy copysets */ virtual CopysetStatistics GetCopysetStatistics(); /** - * @brief 获取copyset的列表,通常检查后会调用,然后打印出来 - * @return copyset的列表 + * @brief to obtain a list of copysets, usually called after checking, and then printed out + * @return List of copysets */ virtual const std::map>& GetCopysetsRes() const { @@ -215,16 +215,16 @@ class CopysetCheckCore { std::vector* copysets); /** - * @brief 获取copyset的详细信息 - * @return copyset的详细信息 + * @brief Get detailed information about copyset + * @return Details of copyset */ virtual const std::string& GetCopysetDetail() const { return copysetsDetail_; } /** - * @brief 获取检查过程中服务异常的chunkserver列表,通常检查后会调用,然后打印出来 - * @return 服务异常的chunkserver的列表 + * @brief: Obtain a list of chunkservers with service exceptions during the inspection process, which is usually called after the inspection and printed out + * @return List of chunkservers with service exceptions */ virtual const std::set& GetServiceExceptionChunkServer() const { @@ -232,8 +232,8 @@ class CopysetCheckCore { } /** - * @brief 获取检查过程中copyset寻找失败的chunkserver列表,通常检查后会调用,然后打印出来 - * @return copyset加载异常的chunkserver的列表 + * @brief: Obtain the list of failed chunkservers for copyset during the check process, which is usually called after the check and printed out + * @return List of chunkservers with copyset loading exceptions */ virtual const std::set& GetCopysetLoadExceptionChunkServer() const { @@ -241,11 +241,11 @@ class CopysetCheckCore { } /** - * @brief 通过发送RPC检查chunkserver是否在线 + * @brief Check if chunkserver is online by sending RPC * - * @param chunkserverAddr chunkserver的地址 + * @param chunkserverAddr Address of chunkserver * - * @return 在线返回true,不在线返回false + * @return returns true online and false offline */ virtual bool CheckChunkServerOnline(const std::string& chunkserverAddr); @@ -260,13 +260,13 @@ class CopysetCheckCore { private: /** - * @brief 从iobuf分析出指定groupId的复制组的信息, - * 每个复制组的信息都放到一个map里面 + * @brief Analyze the replication group information for the specified groupId from iobuf, + * Each replication group's information is placed in a map * - * @param gIds 要查询的复制组的groupId,为空的话全部查询 - * @param iobuf 要分析的iobuf - * @param[out] maps copyset信息的列表,每个copyset的信息都是一个map - * @param saveIobufStr 是否要把iobuf里的详细内容存下来 + * @param gIds: The groupId of the replication group to be queried. If it is empty, all queries will be performed + * @param iobuf The iobuf to analyze + * @param[out] maps A list of copyset information, where each copyset's information is a map + * @param saveIobufStr Do you want to save the detailed content in iobuf * */ void ParseResponseAttachment(const std::set& gIds, @@ -275,12 +275,12 @@ class CopysetCheckCore { bool saveIobufStr = false); /** - * @brief 检查某个chunkserver上的所有copyset的健康状态 + * @brief Check the health status of all copysets on a certain chunkserver * * @param chunkserId chunkserverId - * @param chunkserverAddr chunkserver的地址,两者指定一个就好 + * @param chunkserverAddr chunkserver address, just specify one of the two * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ int CheckCopysetsOnChunkServer(const ChunkServerIdType& chunkserverId, const std::string& chunkserverAddr); @@ -305,14 +305,14 @@ class CopysetCheckCore { bool queryCs = true); /** - * @brief 检查某个server上的所有copyset的健康状态 + * @brief Check the health status of all copysets on a server * - * @param serverId server的id - * @param serverIp server的ip,serverId或serverIp指定一个就好 - * @param queryLeader 是否向leader所在的server发送RPC查询, - * 对于检查cluster来说,所有server都会遍历到,不用查询 + * @param serverId Server ID + * @param serverIp Just specify one of the server's IP, serverId, or serverIp + * @param queryLeader Does the send RPC queries to the server where the leader is located, + * For checking the cluster, all servers will be traversed without querying * - * @return 健康返回0,不健康返回-1 + * @return Health returns 0, unhealthy returns -1 */ int CheckCopysetsOnServer(const ServerIdType& serverId, const std::string& serverIp, @@ -331,55 +331,55 @@ class CopysetCheckCore { std::map> *result); /** - * @brief 根据leader的map里面的copyset信息分析出copyset是否健康,健康返回0,否则 - * 否则返回错误码 + * @brief: Analyze whether the copyset is healthy based on the copyset information in the leader's map, and return 0 if it is healthy. Otherwise + * Otherwise, an error code will be returned * - * @param map leader的copyset信息,以键值对的方式存储 + * @param map The copyset information of the leader is stored as key value pairs * - * @return 返回错误码 + * @return returns an error code */ CheckResult CheckHealthOnLeader(std::map* map); /** - * @brief 向chunkserver发起raft state rpc + * @brief Initiate raft state rpc to chunkserver * - * @param chunkserverAddr chunkserver的地址 - * @param[out] iobuf 返回的responseattachment,返回0的时候有效 + * @param chunkserverAddr Address of chunkserver + * @param[out] iobuf The responseattachment returned by is valid when 0 is returned * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ int QueryChunkServer(const std::string& chunkserverAddr, butil::IOBuf* iobuf); /** - * @brief 把chunkserver上所有的copyset更新到peerNotOnline里面 + * @brief: Update all copysets on chunkserver to peerNotOnline * - * @param csAddr chunkserver的地址 + * @param csAddr chunkserver Address of * - * @return 无 + * @return None */ void UpdatePeerNotOnlineCopysets(const std::string& csAddr); /** - * @brief 以mds中的copyset配置组为参照,检查chunkserver是否在copyset的配置组中 + * @brief: Using the copyset configuration group in mds as a reference, check if chunkserver is in the copyset configuration group * - * @param csAddr chunkserver的地址 - * @param copysets copyset列表 - * @param[out] result 检查结果,copyset到存在与否的映射 + * @param csAddr Address of chunkserver + * @param copysets copyset list + * @param[out] result check result, copyset mapping to presence or absence * - * @return 包含返回true,否则返回false + * @return returns true, otherwise returns false */ int CheckIfChunkServerInCopysets(const std::string& csAddr, const std::set copysets, std::map* result); /** - * @brief 检查没有leader的copyset是否健康 + * @brief Check if the copyset without a leader is healthy * - * @param csAddr chunkserver 地址 - * @param copysetsPeers copyset的groupId到peers的映射 + * @param csAddr chunkserver address + * @param copysetsPeers copyset's groupId to Peers mapping * - * @return 健康返回true,不健康返回false + * @return returns true if healthy, false if unhealthy */ bool CheckCopysetsNoLeader(const std::string& csAddr, const std::map& peers); /** - * @brief 更新chunkserver上的copyset的groupId列表 + * @brief Update the groupId list of copyset on chunkserver * - * @param csAddr chunkserver地址 - * @param copysetInfos copyset信息列表 + * @param csAddr chunkserver address + * @param copysetInfos copyset information list */ void UpdateChunkServerCopysets(const std::string& csAddr, const CopySetInfosType& copysetInfos); @@ -429,23 +429,23 @@ class CopysetCheckCore { int CheckScanStatus(const std::vector& copysetInfos); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; // for unittest mock csClient std::shared_ptr csClient_; - // 保存copyset的信息 + // Save information for copyset std::map> copysets_; - // 用来保存发送RPC失败的那些chunkserver + // Used to save the chunkservers that failed to send RPC std::set serviceExceptionChunkServers_; - // 用来保存一些copyset加载有问题的chunkserver + // Used to save some copysets and load problematic chunkservers std::set copysetLoacExceptionChunkServers_; - // 用来存放访问过的chunkserver上的copyset列表,避免重复RPC + // Used to store the copyset list on accessed chunkservers to avoid duplicate RPCs std::map> chunkserverCopysets_; - // 查询单个copyset的时候,保存复制组的详细信息 + // When querying a single copyset, save the detailed information of the replication group std::string copysetsDetail_; const std::string kEmptyAddr = "0.0.0.0:0:0"; diff --git a/src/tools/curve_cli.cpp b/src/tools/curve_cli.cpp index 0dc5dcf46e..9893fd9e44 100644 --- a/src/tools/curve_cli.cpp +++ b/src/tools/curve_cli.cpp @@ -217,13 +217,13 @@ int CurveCli::ResetPeer() { } curve::common::Peer requestPeer; requestPeer.set_address(requestPeerId.to_string()); - // 目前reset peer只支持reset为1一个副本,不支持增加副本, - // 因为不能通过工具在chunkserver上创建copyset + // Currently, reset peer only supports resetting to 1 replica and does not support adding replicas, + // Because it is not possible to create a copyset on chunkserver through tools if (newConf.size() != 1) { std::cout << "New conf can only specify one peer!" << std::endl; return -1; } - // 新的配置必须包含发送RPC的peer + // The new configuration must include a peer that sends RPC if (*newConf.begin() != requestPeerId) { std::cout << "New conf must include the target peer!" << std::endl; return -1; diff --git a/src/tools/curve_cli.h b/src/tools/curve_cli.h index 24a4944cee..a27683d5bb 100644 --- a/src/tools/curve_cli.h +++ b/src/tools/curve_cli.h @@ -58,29 +58,29 @@ class CurveCli : public CurveTool { mdsClient_(mdsClient) {} /** - * @brief 初始化mds client - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @return returns 0 for success, -1 for failure */ int Init(); /** - * @brief 打印help信息 - * @param 无 - * @return 无 + * @brief Print help information + * @param None + * @return None */ void PrintHelp(const std::string &cmd) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string &cmd) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); @@ -97,47 +97,47 @@ class CurveCli : public CurveTool { const CopysetID& copysetId); /** - * @brief 删除peer - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief delete peer + * @param None + * @return returns 0 for success, -1 for failure */ int RemovePeer(); /** - * @brief 转移leader - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief transfer leader + * @param None + * @return returns 0 for success, -1 for failure */ int TransferLeader(); /** - * @brief 触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshot(); /** - * @brief 触发打快照 - * @param lgPoolId 逻辑池id - * @param copysetId 复制组id - * @param peer 复制组成员 - * @return 成功返回0,失败返回-1 + * @brief trigger to take a snapshot + * @param lgPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param peer replication group members + * @return returns 0 for success, -1 for failure */ int DoSnapshot(uint32_t lgPoolId, uint32_t copysetId, const curve::common::Peer& peer); /** - * @brief 给集群中全部copyset node触发打快照 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Trigger a snapshot of all copyset nodes in the cluster + * @param None + * @return returns 0 for success, -1 for failure */ int DoSnapshotAll(); /** - * @brief 重置配置组成员,目前只支持reset成一个成员 - * @param 无 - * @return 成功返回0,失败返回-1 + * @brief Reset configuration group members, currently only supports resetting to one member + * @param None + * @return returns 0 for success, -1 for failure */ int ResetPeer(); diff --git a/src/tools/curve_format_main.cpp b/src/tools/curve_format_main.cpp index 795813cdfd..6822bd2bdc 100644 --- a/src/tools/curve_format_main.cpp +++ b/src/tools/curve_format_main.cpp @@ -46,11 +46,11 @@ using ::curve::common::align_up; using ::curve::common::is_aligned; /** - * chunkfile pool预分配工具,提供两种分配方式 - * 1. 以磁盘空间百分比方式,指定需要分配的百分比 - * 2. 指定以chunk数量分配 - * 默认的分配方式是以磁盘空间百分比作为分配方式,可以通过-allocateByPercent=false/true - * 调整分配方式。 + * chunkfile pool pre allocation tool, providing two allocation methods + * 1. Specify the percentage to be allocated as a percentage of disk space + * 2. Specify allocation by chunk quantity + * The default allocation method is based on the percentage of disk space, which can be achieved by -allocateByPercent=false/true + * Adjust the allocation method. */ DEFINE_bool(allocateByPercent, true, @@ -81,19 +81,19 @@ DEFINE_string(filePoolMetaPath, "./filePool.meta", "chunkfile pool meta info file path."); -// preallocateNum仅在测试的时候使用,测试提前预分配固定数量的chunk -// 当设置这个值的时候可以不用设置allocatepercent +// preallocateNum is only used during testing, and a fixed number of chunks are pre allocated in advance during testing +// When setting this value, there is no need to set allocatepercent DEFINE_uint32(preAllocateNum, 0, "preallocate chunk nums, this is JUST for curve test"); -// 在系统初始化的时候,管理员需要预先格式化磁盘,并进行预分配 -// 这时候只需要指定allocatepercent,allocatepercent是占整个盘的空间的百分比 +// During system initialization, the administrator needs to pre format the disk and pre allocate it +// At this point, only allocate percentage needs to be specified, which is the percentage of the entire disk space occupied by allocate percentage DEFINE_uint32(allocatePercent, 80, "preallocate storage percent of total disk"); -// 测试情况下置为false,加快测试速度 +// Set to false during testing to accelerate testing speed DEFINE_bool(needWriteZero, true, "not write zero for test."); @@ -210,7 +210,7 @@ static bool CanBitmapFitInMetaPage() { return bitmapBytes <= kMaximumBitmapBytes; } -// TODO(tongguangxun) :添加单元测试 +// TODO(tongguangxun): Adding unit tests int main(int argc, char** argv) { google::ParseCommandLineFlags(&argc, &argv, false); google::InitGoogleLogging(argv[0]); @@ -316,7 +316,7 @@ int main(int argc, char** argv) { return -1; } - // 读取meta文件,检查是否写入正确 + // Read the meta file and check if it is written correctly FilePoolMeta recordMeta; ret = curve::chunkserver::FilePoolHelper::DecodeMetaInfoFromMetaFile( fsptr, FLAGS_filePoolMetaPath, 4096, &recordMeta); diff --git a/src/tools/curve_meta_tool.cpp b/src/tools/curve_meta_tool.cpp index 5d9da78ec0..6c27f9d9da 100644 --- a/src/tools/curve_meta_tool.cpp +++ b/src/tools/curve_meta_tool.cpp @@ -108,7 +108,7 @@ int CurveMetaTool::RunCommand(const std::string& cmd) { int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { - // 打开chunk文件 + // Open chunk file int fd = localFS_->Open(chunkFileName.c_str(), O_RDONLY|O_NOATIME); if (fd < 0) { std::cout << "Fail to open " << chunkFileName << ", " @@ -116,7 +116,7 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { return -1; } - // 读取chunk头部 + // Read chunk header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); @@ -138,13 +138,13 @@ int CurveMetaTool::PrintChunkMeta(const std::string& chunkFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { - // 打开快照文件 + // Open snapshot file int fd = localFS_->Open(snapFileName.c_str(), O_RDONLY|O_NOATIME); if (fd < 0) { std::cout << "Fail to open " << snapFileName << ", " @@ -152,7 +152,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { return -1; } - // 读取快照文件头部 + // Read snapshot file header std::unique_ptr buf(new char[FLAGS_pageSize]); memset(buf.get(), 0, FLAGS_pageSize); int rc = localFS_->Read(fd, buf.get(), 0, FLAGS_pageSize); @@ -174,7 +174,7 @@ int CurveMetaTool::PrintSnapshotMeta(const std::string& snapFileName) { return -1; } - // 打印metaPage + // Print MetaPage std::cout << metaPage; return 0; } diff --git a/src/tools/curve_meta_tool.h b/src/tools/curve_meta_tool.h index fe2b040c58..999f78f589 100644 --- a/src/tools/curve_meta_tool.h +++ b/src/tools/curve_meta_tool.h @@ -53,36 +53,36 @@ class CurveMetaTool : public CurveTool { localFS_(localFs) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 + * @brief Print help information */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印chunk文件元数据 - * @param chunkFileName chunk文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print chunk file metadata + * @param chunkFileName The file name of the chunk file + * @return successfully returns 0, otherwise returns -1 */ int PrintChunkMeta(const std::string& chunkFileName); /** - * @brief 打印快照文件元数据 - * @param snapFileName 快照文件的文件名 - * @return 成功返回0,否则返回-1 + * @brief Print snapshot file metadata + * @param snapFileName The file name of the snapshot file + * @return successfully returns 0, otherwise returns -1 */ int PrintSnapshotMeta(const std::string& snapFileName); diff --git a/src/tools/curve_tool_define.h b/src/tools/curve_tool_define.h index a392b807bd..76a34570d8 100644 --- a/src/tools/curve_tool_define.h +++ b/src/tools/curve_tool_define.h @@ -40,10 +40,10 @@ DECLARE_string(password); namespace curve { namespace tool { -// 显示版本命令 +// Display Version Command const char kVersionCmd[] = "version"; -// StatusTool相关命令 +// StatusTool related commands const char kStatusCmd[] = "status"; const char kSpaceCmd[] = "space"; const char kChunkserverStatusCmd[] = "chunkserver-status"; @@ -58,7 +58,7 @@ const char kSnapshotCloneStatusCmd[] = "snapshot-clone-status"; const char kClusterStatusCmd[] = "cluster-status"; const char kScanStatusCmd[] = "scan-status"; -// NameSpaceTool相关命令 +// NameSpaceTool related commands const char kGetCmd[] = "get"; const char kListCmd[] = "list"; const char kSegInfoCmd[] = "seginfo"; @@ -70,7 +70,7 @@ const char kChunkLocatitonCmd[] = "chunk-location"; const char kUpdateThrottle[] = "update-throttle"; const char kListPoolsets[] = "list-poolsets"; -// CopysetCheck相关命令 +// CopysetCheck related commands const char kCheckCopysetCmd[] = "check-copyset"; const char kCheckChunnkServerCmd[] = "check-chunkserver"; const char kCheckServerCmd[] = "check-server"; @@ -78,13 +78,13 @@ const char kCopysetsStatusCmd[] = "copysets-status"; const char kCheckOperatorCmd[] = "check-operator"; const char kListMayBrokenVolumes[] = "list-may-broken-vol"; -// CopysetTool相关命令 +// CopysetTool related commands const char kSetCopysetAvailFlag[] = "set-copyset-availflag"; -// 一致性检查命令 +// Consistency check command const char kCheckConsistencyCmd[] = "check-consistency"; -// 配置变更命令 +// Configuration change command const char kRemovePeerCmd[] = "remove-peer"; const char kTransferLeaderCmd[] = "transfer-leader"; const char kResetPeerCmd[] = "reset-peer"; @@ -95,18 +95,18 @@ const char kDoSnapshotAll[] = "do-snapshot-all"; const char kRapidLeaderSchedule[] = "rapid-leader-schedule"; const char kSetScanState[] = "set-scan-state"; -// curve文件meta相关的命令 +// Meta related commands for curve files const char kChunkMeta[] = "chunk-meta"; const char kSnapshotMeta[] = "snapshot-meta"; -// raft log相关命令 +// raft log related commands const char kRaftLogMeta[] = "raft-log-meta"; const char kOffline[] = "offline"; const char kVars[] = "/vars/"; const char kConfValue[] = "conf_value"; -// raft state 相关常量 +// raft state related constants const char kState[] = "state"; const char kStateLeader[] = "LEADER"; const char kStateFollower[] = "FOLLOWER"; diff --git a/src/tools/curve_tool_factory.h b/src/tools/curve_tool_factory.h index dc48778713..eb22008ba0 100644 --- a/src/tools/curve_tool_factory.h +++ b/src/tools/curve_tool_factory.h @@ -42,41 +42,41 @@ namespace tool { class CurveToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateCurveTool( const std::string& command); private: /** - * @brief 获取StatusTool实例 + * @brief Get StatusTool instance */ static std::shared_ptr GenerateStatusTool(); /** - * @brief 获取NameSpaceTool实例 + * @brief Get NameSpaceTool instance */ static std::shared_ptr GenerateNameSpaceTool(); /** - * @brief 获取ConsistencyCheck实例 + * @brief Get ConsistencyCheck instance */ static std::shared_ptr GenerateConsistencyCheck(); /** - * @brief 获取CurveCli实例 + * @brief Get CurveCli instance */ static std::shared_ptr GenerateCurveCli(); /** - * @brief 获取CopysetCheck实例 + * @brief Get CopysetCheck instance */ static std::shared_ptr GenerateCopysetCheck(); /** - * @brief 获取ScheduleTool实例 + * @brief to obtain a ScheduleTool instance */ static std::shared_ptr GenerateScheduleTool(); diff --git a/src/tools/curve_tool_main.cpp b/src/tools/curve_tool_main.cpp index 8e516dc0e7..04cac913cf 100644 --- a/src/tools/curve_tool_main.cpp +++ b/src/tools/curve_tool_main.cpp @@ -80,8 +80,8 @@ extern std::string rootUserPassword; } // namespace curve void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and the command line will prevail. This is to avoid strong dependence on the configuration + // If the configuration file exists and no command line is specified, the configuration file shall prevail google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mdsAddr", &info) && info.is_default) { conf->GetStringValue("mdsAddr", &FLAGS_mdsAddr); @@ -168,7 +168,7 @@ int main(int argc, char** argv) { UpdateFlagsFromConf(&conf); - // 关掉健康检查,否则Not Connect to的时候重试没有意义 + // Turn off the health check, otherwise trying again when Not Connect to is meaningless brpc::FLAGS_health_check_interval = -1; auto curveTool = curve::tool::CurveToolFactory::GenerateCurveTool(command); if (!curveTool) { diff --git a/src/tools/etcd_client.h b/src/tools/etcd_client.h index b7d8f56964..08c51694e6 100644 --- a/src/tools/etcd_client.h +++ b/src/tools/etcd_client.h @@ -49,26 +49,26 @@ class EtcdClient { virtual ~EtcdClient() = default; /** - * @brief 初始化etcdAddrVec - * @param etcdAddr etcd的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize etcdAddrVec + * @param etcdAddr etcd addresses, supporting multiple addresses separated by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& etcdAddr); /** - * @brief 获取etcd集群的leader - * @param[out] leaderAddrVec etcd的leader的地址列表,返回值为0时有效 - * @param[out] onlineState etcd集群中每个节点的在线状态,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the leader of the ETCD cluster + * @param[out] leaderAddrVec The address list of the leader for etcd, valid when the return value is 0 + * @param[out] onlineState etcd The online state of each node in the cluster, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetEtcdClusterStatus(std::vector* leaderAddrVec, std::map* onlineState); /** - * @brief 获取etcd的版本并检查版本一致性 - * @param[out] version 版本 - * @param[out] failedList 查询version失败的地址列表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of ETCD and check version consistency + * @param[out] version Version + * @param[out] failedList Query address list for version failure + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckEtcdVersion(std::string* version, std::vector* failedList); diff --git a/src/tools/mds_client.cpp b/src/tools/mds_client.cpp index 22e807ea9e..f8ca7e024a 100644 --- a/src/tools/mds_client.cpp +++ b/src/tools/mds_client.cpp @@ -39,7 +39,7 @@ int MDSClient::Init(const std::string& mdsAddr, if (isInited_) { return 0; } - // 初始化channel + // Initialize channel curve::common::SplitString(mdsAddr, ",", &mdsAddrVec_); if (mdsAddrVec_.empty()) { std::cout << "Split mds address fail!" << std::endl; @@ -57,7 +57,7 @@ int MDSClient::Init(const std::string& mdsAddr, std::cout << "Init channel to " << mdsAddr << "fail!" << std::endl; continue; } - // 寻找哪个mds存活 + // Looking for which mds survived curve::mds::topology::ListPhysicalPoolRequest request; curve::mds::topology::ListPhysicalPoolResponse response; curve::mds::topology::TopologyService_Stub stub(&channel_); @@ -83,7 +83,7 @@ int MDSClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < mdsAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -606,7 +606,7 @@ int MDSClient::ListChunkServersOnServer(ListChunkServerRequest* request, response.statuscode() == kTopoErrCodeSuccess) { for (int i = 0; i < response.chunkserverinfos_size(); ++i) { const auto& chunkserver = response.chunkserverinfos(i); - // 跳过retired状态的chunkserver + // Skipping chunkserver in Retired State if (chunkserver.status() == ChunkServerStatus::RETIRED) { continue; } @@ -915,7 +915,7 @@ void MDSClient::GetMdsOnlineStatus(std::map* onlineStatus) { for (const auto &item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; @@ -973,7 +973,7 @@ bool MDSClient::ChangeMDServer() { std::vector MDSClient::GetCurrentMds() { std::vector leaderAddrs; for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + // Obtain status to determine the address being served std::string status; MetricRet ret = metricClient_.GetMetric(item.second, kMdsStatusMetricName, &status); @@ -1105,7 +1105,7 @@ int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, cntl.set_timeout_ms(FLAGS_rpcTimeout); (obp->*func)(&cntl, request, response, nullptr); if (!cntl.Failed()) { - // 如果成功了,就返回0,对response的判断放到上一层 + //If successful, return 0 and place the response judgment on the previous level return 0; } bool needRetry = (cntl.ErrorCode() != EHOSTDOWN && @@ -1121,10 +1121,10 @@ int MDSClient::SendRpcToMds(Request* request, Response* response, T* obp, } return 0; } - // 对于需要重试的错误,重试次数用完了还没成功就返回错误不切换 - // ERPCTIMEDOUT比较特殊,这种情况下,mds可能切换了也可能没切换,所以 - // 需要重试并且重试次数用完后切换 - // 只有不需要重试的,也就是mds不在线的才会去切换mds + // For errors that require retries, if the retry limit is exhausted without success, return an error without switching. + // However, for ERPCTIMEDOUT, which is a special case, the MDS may have switched or may not have switched, + // so it needs to be retried, and if the retry limit is exhausted, then switch. + // Only for errors that do not require retries, meaning when the MDS is not online, will the MDS be switched. if (needRetry && cntl.ErrorCode() != brpc::ERPCTIMEDOUT) { std::cout << "Send RPC to mds fail, error content: " << cntl.ErrorText() << std::endl; diff --git a/src/tools/mds_client.h b/src/tools/mds_client.h index 05bac69cd5..d54348dc74 100644 --- a/src/tools/mds_client.h +++ b/src/tools/mds_client.h @@ -79,10 +79,10 @@ namespace tool { using curve::mds::topology::PoolsetInfo; enum class GetSegmentRes { - kOK = 0, // 获取segment成功 - kSegmentNotAllocated = -1, // segment不存在 - kFileNotExists = -2, // 文件不存在 - kOtherError = -3 // 其他错误 + kOK = 0, // Successfully obtained segment + kSegmentNotAllocated = -1, // segment does not exist + kFileNotExists = -2, // File does not exist + kOtherError = -3 // Other errors }; using AllocMap = std::unordered_map; @@ -103,88 +103,88 @@ class MDSClient { virtual ~MDSClient() = default; /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief 初始化channel - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有mds用同样的dummy port,用字符串分隔有多个的话 - * 为每个mds设置不同的dummy port - * @return 成功返回0,失败返回-1 + * @brief Initialize channel + * @param mdsAddr Address of mds, supporting multiple addresses separated by ',' + * @param dummyPort dummy port list, if only one is entered + * All mds use the same dummy port, separated by strings if there are multiple + * Set different dummy ports for each mds + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr, const std::string& dummyPort); /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); /** - * @brief 获取文件或目录分配大小 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录分配大小,返回值为0时有效 - * @param[out] allocMap 文件在各个池子分配的情况 - * @return 成功返回0,失败返回-1 + * @brief Get file or directory allocation size + * @param fileName File name + * @param[out] allocSize file or directory allocation size, valid when the return value is 0 + * @param[out] allocMap Allocation of files in various pools + * @return returns 0 for success, -1 for failure */ virtual int GetAllocatedSize(const std::string& fileName, uint64_t* allocSize, AllocMap* allocMap = nullptr); /** - * @brief 获取文件或目录的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录分配大小,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the size of a file or directory + * @param fileName File name + * @param[out] fileSize File or directory allocation size, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileSize(const std::string& fileName, uint64_t* fileSize); /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListDir(const std::string& dirName, std::vector* files); /** - * @brief 获取指定偏移的segment放到segment里面 - * @param fileName 文件名 - * @param offset 偏移值 - * @param[out] segment 文件中指定偏移的segmentInfo,返回值为0时有效 - * @return 返回GetSegmentRes,区分segment未分配和其他错误 + * @brief Get the segment with the specified offset and place it in the segment + * @param fileName File name + * @param offset offset value + * @param[out] segment The segmentInfo of the specified offset in the file is valid when the return value is 0 + * @return returns GetSegmentRes, distinguishing between unassigned segments and other errors */ virtual GetSegmentRes GetSegmentInfo(const std::string& fileName, uint64_t offset, PageFileSegment* segment); /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure */ virtual int DeleteFile(const std::string& fileName, bool forcedelete = false); /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure */ virtual int CreateFile(const CreateFileContext& context); @@ -199,137 +199,137 @@ class MDSClient { std::vector* fileNames); /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的卷大小 - * @return 成功返回0,失败返回-1 + * @brief expansion volume + * @param fileName File name + * @param newSize The volume size after expansion + * @return returns 0 for success, -1 for failure */ virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); /** - * @brief 列出client的dummyserver的地址 - * @param[out] clientAddrs client地址列表,返回0时有效 - * @param[out] listClientsInRepo 把数据库里的client也列出来 - * @return 成功返回0,失败返回-1 + * @brief List the address of the client's dummyserver + * @param[out] clientAddrs client address list, valid when 0 is returned + * @param[out] listClientsInRepo also lists the clients in the database + * @return returns 0 for success, -1 for failure */ virtual int ListClient(std::vector* clientAddrs, bool listClientsInRepo = false); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool id + * @param copysetId copyset id + * @param[out] csLocs List of chunkserver locations, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, std::vector* csLocs); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetIds 要查询的copysetId的列表 - * @param[out] csServerInfos copyset成员的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetIds List of copysetIds to query + * @param[out] csServerInfos A list of copyset members, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerListInCopySets(const PoolIdType& logicalPoolId, const std::vector& copysetIds, std::vector* csServerInfos); /** - * @brief 获取集群中的物理池列表 - * @param[out] pools 物理池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of physical pools in the cluster + * @param[out] pools A list of physical pool information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListPhysicalPoolsInCluster( std::vector* pools); /** - * @brief 获取物理池中的逻辑池列表 - * @param id 物理池id - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of logical pools in the physical pool + * @param id Physical pool id + * @param[out] pools List of logical pool information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListLogicalPoolsInPhysicalPool(const PoolIdType& id, std::vector* pools); /** - * @brief 集群中的逻辑池列表 - * @param[out] pools 逻辑池信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + *List of logical pools in the @brief cluster + * @param[out] pools List of logical pool information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListLogicalPoolsInCluster(std::vector* pools); /** - * @brief 获取物理池中的zone列表 - * @param id 物理池id - * @param[out] zones zone信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief to obtain a list of zones in the physical pool + * @param id Physical pool id + * @param[out] zones A list of zone information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListZoneInPhysicalPool(const PoolIdType& id, std::vector* zones); /** - * @brief 获取zone中的server列表 - * @param id zone id - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief to obtain a list of servers in the zone + * @param id zone id + * @param[out] servers List of server information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListServersInZone(const ZoneIdType& id, std::vector* servers); /** - * @brief 获取server上的chunkserver的列表 - * @param id server id - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of chunkservers on the server + * @param id server id + * @param[out] chunkservers A list of chunkserver information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListChunkServersOnServer(const ServerIdType& id, std::vector* chunkservers); /** - * @brief 获取server上的chunkserver的列表 - * @param ip server ip - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of chunkservers on the server + * @param ip server ip + * @param[out] chunkservers A list of chunkserver information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListChunkServersOnServer(const std::string& ip, std::vector* chunkservers); /** - * @brief 获取chunkserver的详细信息 - * @param id chunkserver id - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param id chunkserver id + * @param[out] chunkserver The detailed information of chunkserver is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerInfo(const ChunkServerIdType& id, ChunkServerInfo* chunkserver); /** - * @brief 获取chunkserver的详细信息 - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param csAddr The address of chunkserver, in the format of ip:port + * @param[out] chunkserver The detailed information of chunkserver is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerInfo(const std::string& csAddr, ChunkServerInfo* chunkserver); /** - * @brief 获取chunkserver上的所有copyset - * @param id chunkserver的id - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get all copysets on chunkserver + * @param id The id of chunkserver + * @param[out] copysets Details of copysets on chunkserver, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopySetsInChunkServer(const ChunkServerIdType& id, std::vector* copysets); /** - * @brief 获取chunkserver上的所有copyset - * @param csAddr chunkserver的地址,ip:port的格式 - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get all copysets on chunkserver + * @param csAddr The address of chunkserver, in the format of ip: port + * @param[out] copysets Details of copysets on chunkserver, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetCopySetsInChunkServer(const std::string& csAddr, std::vector* copysets); @@ -355,16 +355,16 @@ class MDSClient { CopysetInfo* copysetInfo); /** - * @brief 列出集群中的所有server - * @param[out] servers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all servers in the cluster + * @param[out] servers List of server information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListServersInCluster(std::vector* servers); /** - * @brief 列出集群中的所有chunkserver - * @param[out] chunkservers server信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all chunkservers in the cluster + * @param[out] chunkservers A list of server information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListChunkServersInCluster( std::vector* chunkservers); @@ -394,40 +394,40 @@ class MDSClient { virtual int ListUnAvailCopySets(std::vector* copysets); /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名字 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the value of a metric for mds + * @param metricName The name of the metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetMetric(const std::string& metricName, uint64_t* value); /** - * @brief 获取mds的某个metric的值 - * @param metricName metric的名子 - * @param[out] value metric的值,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the value of a metric for mds + * @param metricName The name of metric + * @param[out] value The value of metric is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetMetric(const std::string& metricName, std::string* value); /** - * @brief 设置userName,访问namespace接口的时候调用 - * @param userName 用户名 + * @brief sets userName and calls it when accessing the namespace interface + * @param userName username */ void SetUserName(const std::string& userName) { userName_ = userName; } /** - * @brief 设置password,访问namespace接口的时候调用 - * @param password 密码 + * @brief sets the password and calls it when accessing the namespace interface + * @param password password */ void SetPassword(const std::string& password) { password_ = password; } /** - * @brief 获取mds地址列表 - * @return mds地址的列表 + * @brief Get mds address list + * @return List of mds addresses */ virtual const std::vector& GetMdsAddrVec() const { return mdsAddrVec_; @@ -439,12 +439,12 @@ class MDSClient { } /** - * @brief 获取当前mds的地址 + * @brief Get the address of the current mds */ virtual std::vector GetCurrentMds(); /** - * @brief 向mds发送rpc触发快速leader均衡 + * @brief sends rpc to mds to trigger fast leader balancing */ virtual int RapidLeaderSchedule(PoolIdType lpid); @@ -457,19 +457,19 @@ class MDSClient { virtual int SetLogicalPoolScanState(PoolIdType lpid, bool scanEnable); /** - * @brief 获取mds在线状态, - * dummyserver在线且dummyserver记录的listen addr - * 与mds地址一致才认为在线 - * @param[out] onlineStatus mds在线状态,返回0时有效 - * @return 成功返回0,失败返回-1 + * @brief to obtain mds online status, + * dummyserver is online and the dummyserver records a listen addr + * Only when the address is consistent with the mds address is considered online + * @param[out] onlineStatus mds online status, valid when returned to 0 + * @return returns 0 for success, -1 for failure */ virtual void GetMdsOnlineStatus(std::map* onlineStatus); /** - * @brief 获取指定chunkserver的恢复状态 - * @param[in] cs 需要查询的chunkserver列表 - * @param[out] statusMap 返回各chunkserver对应的恢复状态 - * @return 成功返回0,失败返回-1 + * @brief Get the recovery status of the specified chunkserver + * @param[in] cs List of chunkservers to query + * @param[out] statusMap returns the recovery status corresponding to each chunkserver + * @return returns 0 for success, -1 for failure */ int QueryChunkServerRecoverStatus( const std::vector& cs, @@ -482,15 +482,15 @@ class MDSClient { private: /** - * @brief 切换mds - * @return 切换成功返回true,所有mds都失败则返回false + * @brief switch mds + * @return returns true if the switch is successful, and false if all mds fail */ bool ChangeMDServer(); /** - * @brief 向mds发送RPC,为了复用代码 - * @param - * @return 成功返回0,失败返回-1 + * @brief sends RPC to mds for code reuse + * @param + * @return returns 0 for success, -1 for failure */ template int SendRpcToMds(Request* request, Response* response, T* obp, @@ -499,69 +499,69 @@ class MDSClient { google::protobuf::Closure*)); /** - * @brief 获取server上的chunkserver的列表 - * @param request 要发送的request - * @param[out] chunkservers chunkserver信息的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get a list of chunkservers on the server + * @param request The request to be sent + * @param[out] chunkservers A list of chunkserver information, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ int ListChunkServersOnServer(ListChunkServerRequest* request, std::vector* chunkservers); /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] chunkserver chunkserver的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] chunkserver The detailed information of chunkserver is valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ int GetChunkServerInfo(GetChunkServerInfoRequest* request, ChunkServerInfo* chunkserver); /** - * @brief 获取chunkserver的详细信息 - * @param request 要发送的request - * @param[out] copysets chunkserver上copyset的详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get detailed information about chunkserver + * @param request The request to be sent + * @param[out] copysets Details of copysets on chunkserver, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ int GetCopySetsInChunkServer( GetCopySetsInChunkServerRequest* request, std::vector* copysets); /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure */ int InitDummyServerMap(const std::string& dummyPort); /** - * @brief 通过dummyServer获取mds的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr mds的监听地址 - * @return 成功返回0,失败返回-1 + * @brief: Obtain the listening address of mds through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr mds listening address + * @return returns 0 for success, -1 for failure */ int GetListenAddrFromDummyPort(const std::string& dummyAddr, std::string* listenAddr); - // 填充signature + // Fill in the signature template void FillUserInfo(T* request); - // 用于发送http请求的client + // client used to send HTTP requests MetricClient metricClient_; - // 向mds发送RPC的channel + // Send RPC channel to mds brpc::Channel channel_; - // 保存mds地址的vector + // Save vector for mds address std::vector mdsAddrVec_; - // 保存mds地址对应的dummy server的地址 + // Save the address of the dummy server corresponding to the mds address std::map dummyServerMap_; - // 保存当前mds在mdsAddrVec_中的索引 + // Save the current mds in mdsAddrVec_ Index in int currentMdsIndex_; - // 用户名 + // User name std::string userName_; - // 密码 + // Password std::string password_; - // 避免重复初始化 + // Avoiding duplicate initialization bool isInited_; }; } // namespace tool diff --git a/src/tools/metric_client.cpp b/src/tools/metric_client.cpp index 776347f738..686ff75171 100644 --- a/src/tools/metric_client.cpp +++ b/src/tools/metric_client.cpp @@ -70,7 +70,7 @@ MetricRet MetricClient::GetMetric(const std::string &addr, res = GetValueFromAttachment(attachment, value); return (res == 0) ? MetricRet::kOK : MetricRet::kOtherErr; } - // 这里不输出错误,因为对mds有切换的可能,把打印的处理交给外部 + // There is no output error here, as there is a possibility of switching between mds, and the printing process is handed over to external parties bool notExist = cntl.ErrorCode() == brpc::EHTTP && cntl.http_response().status_code() == kHttpCodeNotFound; return notExist ? MetricRet::kNotFound : MetricRet::kOtherErr; diff --git a/src/tools/metric_client.h b/src/tools/metric_client.h index 94e29a545f..b4fbb28e8b 100644 --- a/src/tools/metric_client.h +++ b/src/tools/metric_client.h @@ -35,11 +35,11 @@ namespace curve { namespace tool { enum class MetricRet { - // 成功 + // Success kOK = 0, - // metric未找到 + // Metric not found kNotFound = -1, - // 其他错误 + // Other errors kOtherErr = -2, }; @@ -50,40 +50,40 @@ class MetricClient { virtual ~MetricClient() {} /** - * @brief 从指定地址获取metric - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + * @brief Get metric from specified address + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetric(const std::string& addr, const std::string& metricName, std::string* value); /** - * @brief 从指定地址获取metric,并转换成uint - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] value metric的值 - * @return 错误码 + * @brief retrieves metric from the specified address and converts it to uint + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] value The value of metric + * @return error code */ virtual MetricRet GetMetricUint(const std::string& addr, const std::string& metricName, uint64_t* value); /** - * @brief 从metric获取配置的值 - * @param addr 要访问的地址 - * @param metricName 要获取的metric name - * @param[out] confValue metric中配置的值 - * @return 错误码 + * @brief Get the configured value from metric + * @param addr Address to access + * @param metricName The metric name to obtain + * @param[out] confValue The value configured in metric + * @return error code */ virtual MetricRet GetConfValueFromMetric(const std::string& addr, const std::string& metricName, std::string* confValue); private: - // 从response attachment解析出metric值 + // Parse the metric value from the response attachment int GetValueFromAttachment(const std::string& attachment, std::string* value); }; diff --git a/src/tools/metric_name.h b/src/tools/metric_name.h index d284694aba..f798d58bb9 100644 --- a/src/tools/metric_name.h +++ b/src/tools/metric_name.h @@ -50,7 +50,7 @@ const char kSechduleOpMetricpPrefix[] = "mds_scheduler_metric_"; const char kMdsListenAddrMetricName[] = "mds_config_mds_listen_addr"; const char kMdsStatusMetricName[] = "mds_status"; const char kMdsStatusLeader[] = "leader"; -// operator名称 +// operator Name const char kTotalOpName[] = "operator"; const char kChangeOpName[] = "change_peer"; const char kAddOpName[] = "add_peer"; diff --git a/src/tools/namespace_tool.cpp b/src/tools/namespace_tool.cpp index 8d6119b75d..f5565fd31e 100644 --- a/src/tools/namespace_tool.cpp +++ b/src/tools/namespace_tool.cpp @@ -77,7 +77,7 @@ bool NameSpaceTool::SupportCommand(const std::string& command) { || command == kListPoolsets); } -// 根据命令行参数选择对应的操作 +// Select the corresponding operation based on command line parameters int NameSpaceTool::RunCommand(const std::string &cmd) { if (Init() != 0) { std::cout << "Init NameSpaceTool failed" << std::endl; @@ -92,7 +92,7 @@ int NameSpaceTool::RunCommand(const std::string &cmd) { } else if (cmd == kSegInfoCmd) { return PrintSegmentInfo(fileName); } else if (cmd == kDeleteCmd) { - // 单元测试不判断输入 + // Unit testing does not judge input if (FLAGS_isTest) { return core_->DeleteFile(fileName, FLAGS_forcedelete); } @@ -204,7 +204,7 @@ int NameSpaceTool::PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo) { PrintFileInfo(fileInfo); int ret = GetAndPrintAllocSize(fullName); - // 如果是目录的话,计算目录中的文件大小(用户创建时指定的) + // If it is a directory, calculate the file size in the directory (specified by the user when creating it) if (fileInfo.filetype() == curve::mds::FileType::INODE_DIRECTORY) { ret = GetAndPrintFileSize(fullName); } @@ -255,14 +255,14 @@ void NameSpaceTool::PrintFileInfo(const FileInfo& fileInfo) { curve::common::SplitString(fileInfoStr, "\n", &items); for (const auto& item : items) { if (item.compare(0, 5, "ctime") == 0) { - // ctime是微妙,打印的时候只打印到秒 + // CTIME is subtle, printing only takes seconds time_t ctime = fileInfo.ctime() / 1000000; std::string standard; curve::common::TimeUtility::TimeStampToStandard(ctime, &standard); std::cout << "ctime: " << standard << std::endl; continue; } - // 把length转换成GB + // Convert length to GB if (item.compare(0, 6, "length") == 0) { uint64_t length = fileInfo.length(); double fileSize = static_cast(length) / curve::mds::kGB; @@ -400,7 +400,7 @@ int NameSpaceTool::PrintChunkLocation(const std::string& fileName, } void NameSpaceTool::TrimEndingSlash(std::string* fileName) { - // 如果最后面有/,去掉 + //If there is/at the end, remove it if (fileName->size() > 1 && fileName->back() == '/') { fileName->pop_back(); } diff --git a/src/tools/namespace_tool.h b/src/tools/namespace_tool.h index 1af7f8ca8f..7c4d418d7e 100644 --- a/src/tools/namespace_tool.h +++ b/src/tools/namespace_tool.h @@ -56,67 +56,67 @@ class NameSpaceTool : public CurveTool { core_(core), inited_(false) {} /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ void PrintHelp(const std::string &command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string &command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: - // 初始化 + // Initialize int Init(); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fileName); - // 打印fileInfo和文件占用的实际空间 + // Print fileInfo and the actual space occupied by the file int PrintFileInfoAndActualSize(const std::string& fullName, const FileInfo& fileInfo); - // 打印目录中的文件信息 + // Print file information in the directory int PrintListDir(const std::string& dirName); - // 打印出文件的segment信息 + // Print out the segment information of the file int PrintSegmentInfo(const std::string &fileName); - // 打印fileInfo,把时间转化为易读的格式输出 + // Print fileInfo and convert the time into a readable format for output void PrintFileInfo(const FileInfo& fileInfo); - // 打印PageFileSegment,把同一个chunk的信息打在同一行 + // Print PageFileSegment and type information for the same chunk on the same line void PrintSegment(const PageFileSegment& segment); - // 打印chunk的位置信息 + // Print the location information of the chunk int PrintChunkLocation(const std::string& fileName, uint64_t offset); - // 打印文件的分配大小 + // Allocation size of printed files int GetAndPrintAllocSize(const std::string& fileName); - // 打印目录的file size + // Print the file size of the directory int GetAndPrintFileSize(const std::string& fileName); - // 目前curve mds不支持/test/格式的文件名,需要把末尾的/去掉 + // Currently, curve mds does not support file names in the/test/format, so the/at the end needs to be removed void TrimEndingSlash(std::string* fileName); int PrintPoolsets(); private: - // 核心逻辑 + // Core logic std::shared_ptr core_; - // 是否初始化成功过 + // Has initialization been successful bool inited_; }; } // namespace tool diff --git a/src/tools/namespace_tool_core.cpp b/src/tools/namespace_tool_core.cpp index b69a6ecacc..ffe694507f 100644 --- a/src/tools/namespace_tool_core.cpp +++ b/src/tools/namespace_tool_core.cpp @@ -98,13 +98,13 @@ int NameSpaceToolCore::GetFileSegments(const std::string& fileName, int NameSpaceToolCore::GetFileSegments(const std::string& fileName, const FileInfo& fileInfo, std::vector* segments) { - // 只能获取page file的segment + // Only segments of page files can be obtained if (fileInfo.filetype() != curve::mds::FileType::INODE_PAGEFILE) { std::cout << "It is not a page file!" << std::endl; return -1; } - // 获取文件的segment数,并打印每个segment的详细信息 + // Obtain the number of segments in the file and print detailed information for each segment uint64_t segmentNum = fileInfo.length() / fileInfo.segmentsize(); uint64_t segmentSize = fileInfo.segmentsize(); for (uint64_t i = 0; i < segmentNum; i++) { @@ -117,7 +117,7 @@ int NameSpaceToolCore::GetFileSegments(const std::string& fileName, } else if (res == GetSegmentRes::kSegmentNotAllocated) { continue; } else if (res == GetSegmentRes::kFileNotExists) { - // 查询过程中文件被删掉了,清空segment并返回0 + // uring the query process, the file was deleted, the segment was cleared, and 0 was returned segments->clear(); return 0; } else { @@ -229,7 +229,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } uint64_t segmentSize = fileInfo.segmentsize(); - // segment对齐的offset + // segment aligned offset uint64_t segOffset = (offset / segmentSize) * segmentSize; PageFileSegment segment; GetSegmentRes segRes = client_->GetSegmentInfo(fileName, @@ -243,7 +243,7 @@ int NameSpaceToolCore::QueryChunkCopyset(const std::string& fileName, return -1; } } - // 在segment里面的chunk的索引 + // Index of chunk in segment if (segment.chunksize() == 0) { std::cout << "No chunks in segment!" << std::endl; return -1; diff --git a/src/tools/namespace_tool_core.h b/src/tools/namespace_tool_core.h index febf0882f8..a6209395a1 100644 --- a/src/tools/namespace_tool_core.h +++ b/src/tools/namespace_tool_core.h @@ -60,103 +60,103 @@ class NameSpaceToolCore { virtual ~NameSpaceToolCore() = default; /** - * @brief 初始化mds client - * @param mdsAddr mds的地址,支持多地址,用","分隔 - * @return 成功返回0,失败返回-1 + * @brief Initialize mds client + * @param mdsAddr Address of mds, supporting multiple addresses separated by ',' + * @return returns 0 for success, -1 for failure */ virtual int Init(const std::string& mdsAddr); /** - * @brief 获取文件fileInfo - * @param fileName 文件名 - * @param[out] fileInfo 文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get file fileInfo + * @param fileName File name + * @param[out] fileInfo file fileInfo, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetFileInfo(const std::string& fileName, FileInfo* fileInfo); /** - * @brief 将目录下所有的fileInfo列出来 - * @param dirName 目录名 - * @param[out] files 目录下的所有文件fileInfo,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief List all fileInfo in the directory + * @param dirName directory name + * @param[out] files All fileInfo files in the directory are valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int ListDir(const std::string& dirName, std::vector* files); /** - * @brief 获取copyset中的chunkserver列表 - * @param logicalPoolId 逻辑池id - * @param copysetId copyset id - * @param[out] csLocs chunkserver位置的列表,返回值为0时有效 - * @return 成功返回0,失败返回-1 + * @brief Get the list of chunkservers in the copyset + * @param logicalPoolId Logical Pool ID + * @param copysetId copyset ID + * @param[out] csLocs List of chunkserver locations, valid when the return value is 0 + * @return returns 0 for success, -1 for failure */ virtual int GetChunkServerListInCopySet(const PoolIdType& logicalPoolId, const CopySetIdType& copysetId, std::vector* csLocs); /** - * @brief 删除文件 - * @param fileName 文件名 - * @param forcedelete 是否强制删除 - * @return 成功返回0,失败返回-1 + * @brief Delete file + * @param fileName File name + * @param forcedelete: Do you want to force deletion + * @return returns 0 for success, -1 for failure */ virtual int DeleteFile(const std::string& fileName, bool forcedelete = false); /** - * @brief create pageFile or directory - * @param fileName file name or dir name - * @param length 文件长度 - * @param normalFile is file or dir - * @param stripeUnit stripe unit size - * @param stripeCount the amount of stripes - * @return 成功返回0,失败返回-1 + * @brief create pageFile or directory + * @param fileName file name or dir name + * @param length File length + * @param normalFile is file or dir + * @param stripeUnit stripe unit size + * @param stripeCount the amount of stripes + * @return returns 0 for success, -1 for failure */ virtual int CreateFile(const CreateFileContext& ctx); /** - * @brief 扩容卷 - * @param fileName 文件名 - * @param newSize 扩容后的文件长度 - * @return 成功返回0,失败返回-1 + * @brief expansion volume + * @param fileName File name + * @param newSize The file length after expansion + * @return returns 0 for success, -1 for failure */ virtual int ExtendVolume(const std::string& fileName, uint64_t newSize); /** - * @brief 计算文件或目录实际分配的空间 - * @param fileName 文件名 - * @param[out] allocSize 文件或目录已分配大小,返回值为0是有效 - * @param[out] allocMap 在每个池子的分配量,返回值0时有效 - * @return 成功返回0,失败返回-1 + * @brief Calculate the actual allocated space of a file or directory + * @param fileName File name + * @param[out] allocSize The file or directory has already been allocated a size, and a return value of 0 is valid + * @param[out] allocMap The allocation amount of each pool, valid when returning a value of 0 + * @return returns 0 for success, -1 for failure */ virtual int GetAllocatedSize(const std::string& fileName, uint64_t* allocSize, AllocMap* allocMap = nullptr); /** - * @brief 返回文件或目录的中的文件的用户申请的大小 - * @param fileName 文件名 - * @param[out] fileSize 文件或目录中用户申请的大小,返回值为0是有效 - * @return 成功返回0,失败返回-1 + * @brief Returns the user requested size of files in a file or directory + * @param fileName File name + * @param[out] fileSize The size requested by the user in the file or directory, with a return value of 0 being valid + * @return returns 0 for success, -1 for failure */ virtual int GetFileSize(const std::string& fileName, uint64_t* fileSize); /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it will be -1 */ virtual int GetFileSegments(const std::string& fileName, std::vector* segments); /** - * @brief 查询offset对应的chunk的id和所属的copyset - * @param fileName 文件名 - * @param offset 文件中的偏移 - * @param[out] chunkId chunkId,返回值为0时有效 - * @param[out] copyset chunk对应的copyset,是logicalPoolId和copysetId的pair - * @return 成功返回0,失败返回-1 + * @brief: Query the ID of the chunk corresponding to the offset and the copyset it belongs to + * @param fileName File name + * @param offset Offset in file + * @param[out] chunkId chunkId, valid when the return value is 0 + * @param[out] copyset The copyset corresponding to the chunk is the pair of logicalPoolId and copysetId + * @return returns 0 for success, -1 for failure */ virtual int QueryChunkCopyset(const std::string& fileName, uint64_t offset, uint64_t* chunkId, @@ -182,17 +182,17 @@ class NameSpaceToolCore { private: /** - * @brief 获取文件的segment信息并输出到segments里面 - * @param fileName 文件名 - * @param fileInfo 文件的fileInfo - * @param[out] segments 文件segment的列表 - * @return 返回文件实际分配大小,失败则为-1 + * @brief Get the segment information of the file and output it to segments + * @param fileName File name + * @param fileInfo The fileInfo of the file + * @param[out] segments List of segments in the file + * @return returns the actual allocated size of the file, if it fails, it will be -1 */ int GetFileSegments(const std::string& fileName, const FileInfo& fileInfo, std::vector* segments); - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr client_; }; } // namespace tool diff --git a/src/tools/raft_log_tool.cpp b/src/tools/raft_log_tool.cpp index a4fb97e142..647ddc43f4 100644 --- a/src/tools/raft_log_tool.cpp +++ b/src/tools/raft_log_tool.cpp @@ -87,7 +87,7 @@ bool RaftLogTool::SupportCommand(const std::string& cmd) { } int RaftLogTool::PrintHeaders(const std::string& fileName) { - // 从文件名获取firstIndex + // Get firstIndex from file name int64_t firstIndex; int res = ParseFirstIndexFromFileName(fileName, &firstIndex); if (res != 0) { diff --git a/src/tools/raft_log_tool.h b/src/tools/raft_log_tool.h index d056608bb9..1a6b7bc75b 100644 --- a/src/tools/raft_log_tool.h +++ b/src/tools/raft_log_tool.h @@ -60,37 +60,37 @@ class SegmentParser { localFS_(localFS) {} /** - * @brief 初始化 - * @param fileName segmnet文件的文件名 - * @return 获取成功返回0,失败返回-1 + * @brief initialization + * @param fileName The file name of the segmnet file + * @return returns 0 if successful, -1 if unsuccessful */ virtual int Init(const std::string& fileName); /** - * @brief 反初始化 + * @brief deinitialization */ virtual void UnInit(); /** - * @brief 获取下一个EntryHeader - * @param[out] header log entry header - * @return 获取成功返回true,失败返回false + * @brief Get the next EntryHeader + * @param[out] header log entry header + * @return returns true for success, false for failure */ virtual bool GetNextEntryHeader(EntryHeader* header); /** - * @brief 判断读取是否成功完成 + * @brief Determine if the read was successfully completed */ virtual bool SuccessfullyFinished() { return off_ >= fileLen_; } private: - // 文件描述符 + // File Descriptor int fd_; - // 下一个Entry的偏移 + // Offset for the next Entry int64_t off_; - // 文件长度 + // File length int64_t fileLen_; std::shared_ptr localFS_; @@ -102,46 +102,46 @@ class RaftLogTool : public CurveTool { parser_(parser) {} /** - * @brief 执行命令 - * @param command 要执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command The command to be executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string& command) override; /** - * @brief 打印帮助信息 + * @brief Print help information */ void PrintHelp(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); private: /** - * @brief 打印文件中所有raft log的头部信息 - * @param fileName raft log文件名 - * @return 成功返回0,否则返回-1 + * @brief Print the header information of all raft logs in the file + * @param fileName raft log file name + * @return successfully returns 0, otherwise returns -1 */ int PrintHeaders(const std::string& fileName); /** - * @brief 从文件解析出entry header - * @param fd 文件描述符 - * @param offset 文件中的偏移 - * @param[out] head entry头部信息,返回值为0时有效 - * @return 成功返回0,否则返回-1 + * @brief Parse the entry header from the file + * @param fd file descriptor + * @param offset Offset in file + * @param[out] head entry header information, valid when the return value is 0 + * @return successfully returns 0, otherwise returns -1 */ int ParseEntryHeader(int fd, off_t offset, EntryHeader *head); /** - * @brief 从文件名解析first index - * @param fileName raft log文件名 - * @param[out] firstIndex segment文件包含的log entry的第一个index - * @return 成功返回0,否则返回-1 + * @brief Parsing first index from file name + * @param fileName raft log file name + * @param[out] firstIndex The first index of the log entry contained in the segment file + * @return successfully returns 0, otherwise returns -1 */ int ParseFirstIndexFromFileName(const std::string& fileName, int64_t* firstIndex); diff --git a/src/tools/schedule_tool.cpp b/src/tools/schedule_tool.cpp index 25cd976382..13c8d57738 100644 --- a/src/tools/schedule_tool.cpp +++ b/src/tools/schedule_tool.cpp @@ -109,7 +109,7 @@ int ScheduleTool::DoRapidLeaderSchedule() { } int ScheduleTool::ScheduleOne(PoolIdType lpoolId) { - // 给mds发送rpc + // Send rpc to mds int res = mdsClient_->RapidLeaderSchedule(lpoolId); if (res != 0) { std::cout << "RapidLeaderSchedule pool " << lpoolId diff --git a/src/tools/schedule_tool.h b/src/tools/schedule_tool.h index edc9bf44dc..d41b665857 100644 --- a/src/tools/schedule_tool.h +++ b/src/tools/schedule_tool.h @@ -39,36 +39,36 @@ class ScheduleTool : public CurveTool { : mdsClient_(mdsClient) {} /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ void PrintHelp(const std::string &command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string &command) override; private: /** - * @brief PrintRapidLeaderSchedule 打印rapid-leader-schdule的help信息 + * @brief PrintRapidLeaderSchedule Print help information for rapid-leader-schdule */ void PrintRapidLeaderScheduleHelp(); void PrintSetScanStateHelp(); /** - * @brief DoRapidLeaderSchedule 向mds发送rpc进行快速transfer leader + * @brief DoRapidLeaderSchedule sends rpc to mds for fast transfer leader */ int DoRapidLeaderSchedule(); diff --git a/src/tools/snapshot_check.h b/src/tools/snapshot_check.h index 87bf512758..7b3a7509c5 100644 --- a/src/tools/snapshot_check.h +++ b/src/tools/snapshot_check.h @@ -50,35 +50,35 @@ class SnapshotCheck : public CurveTool { /** - * @brief 打印用法 - * @param command:查询的命令 - * @return 无 + * @brief printing usage + * @param command: Query command + * @return None */ void PrintHelp(const std::string &command) override; /** - * @brief 执行命令 - * @param command:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param command: The command executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string &command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string& command); /** - * @brief 比较文件和快照的一致性 - * @return 成功返回0,失败返回-1 + * @brief Compare file and snapshot consistency + * @return returns 0 for success, -1 for failure */ int Check(); private: /** - * 初始化 + * Initialize */ int Init(); diff --git a/src/tools/snapshot_clone_client.cpp b/src/tools/snapshot_clone_client.cpp index 2b8be3c739..f40e0755d0 100644 --- a/src/tools/snapshot_clone_client.cpp +++ b/src/tools/snapshot_clone_client.cpp @@ -48,7 +48,7 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::cout << "split dummy server fail!" << std::endl; return -1; } - // 只指定了一个端口,对所有mds采用这个端口 + // Only one port has been specified, and this port is used for all mds if (dummyPortVec.size() == 1) { for (uint64_t i = 0; i < serverAddrVec_.size() - 1; ++i) { dummyPortVec.emplace_back(dummyPortVec[0]); @@ -77,7 +77,7 @@ int SnapshotCloneClient::InitDummyServerMap(const std::string& dummyPort) { std::vector SnapshotCloneClient::GetActiveAddrs() { std::vector activeAddrs; for (const auto &item : dummyServerMap_) { - // 获取status来判断正在服务的地址 + // Obtain status to determine the address being served std::string status; MetricRet ret = metricClient_->GetMetric(item.second, kSnapshotCloneStatusMetricName, &status); @@ -87,7 +87,7 @@ std::vector SnapshotCloneClient::GetActiveAddrs() { continue; } if (status == kSnapshotCloneStatusActive) { - // 如果是active状态,再访问一下服务端口 + // If it is in an active state, please visit the service port again MetricRet ret = metricClient_->GetMetric(item.first, kSnapshotCloneStatusMetricName, &status); if (ret != MetricRet::kOK) { @@ -107,7 +107,7 @@ void SnapshotCloneClient::GetOnlineStatus( for (const auto &item : dummyServerMap_) { std::string listenAddr; int res = GetListenAddrFromDummyPort(item.second, &listenAddr); - // 如果获取到的监听地址与记录的mds地址不一致,也认为不在线 + // If the obtained listening address does not match the recorded MDS address, it is also considered offline if (res != 0 || listenAddr != item.first) { onlineStatus->emplace(item.first, false); continue; diff --git a/src/tools/snapshot_clone_client.h b/src/tools/snapshot_clone_client.h index 295134bd50..6d09930ec6 100644 --- a/src/tools/snapshot_clone_client.h +++ b/src/tools/snapshot_clone_client.h @@ -41,30 +41,30 @@ class SnapshotCloneClient { virtual ~SnapshotCloneClient() = default; /** - * @brief 初始化,从字符串解析出地址和dummy port - * @param serverAddr snapshot clone server的地址,支持多地址,用","分隔 - * @param dummyPort dummy port列表,只输入一个的话 - * 所有server用同样的dummy port,用字符串分隔有多个的话 - * 为每个server设置不同的dummy port - * @return - * success: 0 - * failed: -1 - * no snapshot server: 1 + * @brief initialization, parsing the address and dummy port from the string + * @param serverAddr Address of snapshot clone server, supporting multiple addresses separated by ',' + * @param dummyPort dummyPort list, if only one is entered + * All servers use the same dummy port, separated by strings if there are multiple + * Set different dummy ports for each server + * @return + * Success: 0 + * Failed: -1 + * No snapshot server: 1 * */ virtual int Init(const std::string& serverAddr, const std::string& dummyPort); /** - * @brief 获取当前服务的snapshot clone server的地址 + * @brief Get the address of the snapshot clone server for the current service */ virtual std::vector GetActiveAddrs(); /** - * @brief 获取snapshot clone server的在线状态 - * dummyserver在线且dummyserver记录的listen addr - * 与服务地址一致才认为在线 - * @param[out] onlineStatus 每个节点的在线状态 + * @brief Get the online status of the snapshot clone server + * dummyserver is online and the dummyserver records a listen addr + * Only when consistent with the service address is considered online + * @param[out] onlineStatus The online status of each node */ virtual void GetOnlineStatus(std::map* onlineStatus); @@ -75,27 +75,27 @@ class SnapshotCloneClient { private: /** - * @brief 初始化dummy server地址 - * @param dummyPort dummy server端口列表 - * @return 成功返回0,失败返回-1 + * @brief Initialize dummy server address + * @param dummyPort dummy server port list + * @return returns 0 for success, -1 for failure */ int InitDummyServerMap(const std::string& dummyPort); /** - * @brief 通过dummyServer获取server的监听地址 - * @param dummyAddr dummyServer的地址 - * @param[out] listenAddr 服务地址 - * @return 成功返回0,失败返回-1 + * @brief: Obtain the listening address of the server through dummyServer + * @param dummyAddr Address of dummyServer + * @param[out] listenAddr service address + * @return returns 0 for success, -1 for failure */ int GetListenAddrFromDummyPort(const std::string& dummyAddr, std::string* listenAddr); private: - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 保存server地址的vector + // Save the vector of the server address std::vector serverAddrVec_; - // 保存server地址对应的dummy server的地址 + // Save the address of the dummy server corresponding to the server address std::map dummyServerMap_; }; diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index e6bfc116a4..526e998a71 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -253,7 +253,7 @@ int StatusTool::ChunkServerListCmd() { double unhealthyRatio = 0.0; if (FLAGS_checkCSAlive) { - // 发RPC重置online状态 + // Send RPC to reset online status std::string csAddr = chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); @@ -495,24 +495,24 @@ int StatusTool::PrintClusterStatus() { bool StatusTool::IsClusterHeatlhy() { bool ret = true; - // 1、检查copyset健康状态 + // 1. Check the health status of copyset int res = copysetCheckCore_->CheckCopysetsInCluster(); if (res != 0) { std::cout << "Copysets are not healthy!" << std::endl; ret = false; } - // 2、检查mds状态 + // 2. Check the mds status if (!CheckServiceHealthy(ServiceName::kMds)) { ret = false; } - // 3、检查etcd在线状态 + // 3. Check the online status of ETCD if (!CheckServiceHealthy(ServiceName::kEtcd)) { ret = false; } - // 4、检查snapshot clone server状态 + // 4. Check the status of the snapshot clone server if (!noSnapshotServer_ && !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) { ret = false; @@ -1054,7 +1054,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { std::cout << "Get root directory file size from mds fail!" << std::endl; return -1; } - // 从metric获取space信息 + // Obtain space information from metric for (const auto& lgPool : lgPools) { LogicalpoolSpaceInfo lpinfo; std::string poolName = lgPool.logicalpoolname(); @@ -1096,7 +1096,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { std::pair( lgPool.logicalpoolid(), lpinfo)); } - // 获取RecycleBin的分配大小 + // Obtain the allocation size of RecycleBin res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &spaceInfo->recycleAllocSize); if (res != 0) { diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 2b54d70943..3d02be0355 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -63,22 +63,22 @@ struct LogicalpoolSpaceInfo { std::string poolName = ""; uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; }; struct SpaceInfo { uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; - // recycleBin的分配大小 + // Allocation size of recycleBin uint64_t recycleAllocSize = 0; - // 系统中存在的文件大小 + // File size present in the system uint64_t currentFileSize = 0; std::unordered_map lpoolspaceinfo; }; @@ -107,28 +107,28 @@ class StatusTool : public CurveTool { ~StatusTool() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ void PrintHelp(const std::string &command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ int RunCommand(const std::string &command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief returns whether the command is supported + * @param command: The command executed + * @return true/false */ static bool SupportCommand(const std::string &command); /** - * @brief 判断集群是否健康 + * @brief to determine whether the cluster is healthy */ bool IsClusterHeatlhy(); @@ -156,62 +156,62 @@ class StatusTool : public CurveTool { int PrintSnapshotCloneStatus(); /** - * @brief 判断命令是否需要和etcd交互 - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command needs to interact with ETCD + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ bool CommandNeedEtcd(const std::string &command); /** - * @brief 判断命令是否需要mds - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief to determine if the command requires mds + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ bool CommandNeedMds(const std::string &command); /** - * @brief 判断命令是否需要snapshot clone server - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief: Determine if the command requires a snapshot clone server + * @param command: The command executed + * @return needs to return true, otherwise it will return false */ bool CommandNeedSnapshotClone(const std::string &command); /** - * @brief 打印在线状态 - * @param name : 在线状态对应的名字 - * @param onlineStatus 在线状态的map + * @brief Print online status + * @param name: The name corresponding to the online status + * @param onlineStatus Map of online status */ void PrintOnlineStatus(const std::string &name, const std::map &onlineStatus); /** - * @brief 获取并打印mds version信息 + * @brief Get and print mds version information */ int GetAndPrintMdsVersion(); /** - * @brief 检查服务是否健康 - * @param name 服务名 + * @brief Check if the service is healthy + * @param name Service Name */ bool CheckServiceHealthy(const ServiceName &name); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // Copyset检查工具,用于检查集群和chunkserver的健康状态 + // Copyset checking tool, used to check the health status of clusters and chunkservers std::shared_ptr copysetCheckCore_; - // etcd client,用于调etcd API获取状态 + // ETCD client, used to call the ETCD API to obtain status std::shared_ptr etcdClient_; - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 用于获取snapshot clone的状态 + // Used to obtain the status of snapshot clones std::shared_ptr snapshotClient_; - // version client,用于获取version信息 + // Version client, used to obtain version information std::shared_ptr versionTool_; - // mds是否初始化过 + // Has the mds been initialized bool mdsInited_; - // etcd是否初始化过 + // Has ETCD been initialized bool etcdInited_; // Is there a snapshot service or not bool noSnapshotServer_; diff --git a/src/tools/version_tool.cpp b/src/tools/version_tool.cpp index 6e519bab4a..596babf669 100644 --- a/src/tools/version_tool.cpp +++ b/src/tools/version_tool.cpp @@ -159,7 +159,7 @@ void VersionTool::GetVersionMap(const std::vector& addrVec, MetricRet res = metricClient_->GetMetric(addr, kCurveVersionMetricName, &version); if (res != MetricRet::kOK) { - // 0.0.5.2版本之前没有curve_version的metric,因此再判断一下 + // Before version 0.0.5.2, there was no "curve_version" metric, so let's double-check. if (res == MetricRet::kNotFound) { version = kOldVersion; } else { diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 9231d1e4fc..16eb154b85 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -54,90 +54,90 @@ class VersionTool { virtual ~VersionTool() {} /** - * @brief 获取mds的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of mds and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckMdsVersion(std::string *version, std::vector *failedList); /** - * @brief 获取chunkserver的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of chunkserver and check version consistency + * @param[out] version version + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckChunkServerVersion(std::string *version, std::vector *failedList); /** - * @brief 获取snapshot clone server的版本 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the snapshot clone server + * @param[out] version version + * @return returns 0 for success, -1 for failure */ virtual int GetAndCheckSnapshotCloneVersion(std::string *version, std::vector *failedList); /** - * @brief 获取client的版本 - * @param[out] versionMap process->版本->地址的映射表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the client + * @param[out] versionMap process ->Version ->Address mapping table + * @return returns 0 for success, -1 for failure */ virtual int GetClientVersion(ClientVersionMapType *versionMap); /** - * @brief 打印每个version对应的地址 - * @param versionMap version到地址列表的map + * @brief Print the address corresponding to each version + * @param versionMap version to address list map */ static void PrintVersionMap(const VersionMapType &versionMap); /** - * @brief 打印访问失败的地址 - * @param failedList 访问失败的地址列表 + * @brief Print access failed addresses + * @param failedList Access Failed Address List */ static void PrintFailedList(const std::vector &failedList); private: /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] versionMap version到地址的map - * @param[out] failedList 查询version失败的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] versionMap version to address map + * @param[out] failedList Query address list for version failure */ void GetVersionMap(const std::vector &addrVec, VersionMapType *versionMap, std::vector *failedList); /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] processMap 不同的process对应的client的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] processMap The address list of clients corresponding to different processes */ void FetchClientProcessMap(const std::vector &addrVec, ProcessMapType *processMap); /** - * @brief 从启动server的命令行获取对应的程序的名字 - * 比如nebd的命令行为 + * @brief Get the name of the corresponding program from the command line of starting the server + * For example, the command behavior of nebd * process_cmdline : "/usr/bin/nebd-server * -confPath=/etc/nebd/nebd-server.conf * -log_dir=/data/log/nebd/server * -graceful_quit_on_sigterm=true * -stderrthreshold=3 * " - * 那么我们要解析出的名字是nebd-server - * @param addrVec 地址列表 - * @return 进程的名字 + * So the name we need to resolve is nebd server + * @param addrVec Address List + * @return The name of the process */ std::string GetProcessNameFromCmd(const std::string &cmd); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // 用于获取snapshotClone状态 + // Used to obtain snapshot clone status std::shared_ptr snapshotClient_; - // 获取metric的client + // Obtain metric clients std::shared_ptr metricClient_; }; diff --git a/test/chunkserver/braft_cli_service2_test.cpp b/test/chunkserver/braft_cli_service2_test.cpp index cc97980aa2..00781d15e6 100644 --- a/test/chunkserver/braft_cli_service2_test.cpp +++ b/test/chunkserver/braft_cli_service2_test.cpp @@ -78,7 +78,7 @@ class BraftCliService2Test : public testing::Test { const char *ip = "127.0.0.1"; int port = 9310; const char *confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; - int snapshotInterval = 3600; // 防止自动打快照 + int snapshotInterval = 3600; // Prevent automatic snapshot taking int electionTimeoutMs = 3000; pid_t pid1; @@ -167,7 +167,7 @@ TEST_F(BraftCliService2Test, basic2) { return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ WaitpidGuard waitpidGuard(pid1, pid2, pid3); ::usleep(1.2 * 1000 * electionTimeoutMs); @@ -182,7 +182,7 @@ TEST_F(BraftCliService2Test, basic2) { options.timeout_ms = 3000; options.max_retry = 3; - /* add peer - 非法copyset */ + /*Add peer - illegal copyset*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); @@ -190,7 +190,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); AddPeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -210,7 +210,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法peerid */ + /*Add peer - illegal peer id*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); @@ -223,7 +223,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_addpeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); AddPeerResponse2 response; @@ -237,13 +237,13 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /*Add peer - sent to peers who are not leaders*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; @@ -274,7 +274,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法copyset */ + /*Remove peer - illegal copyset*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); @@ -282,7 +282,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); RemovePeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -302,7 +302,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法peer id */ + /*Remove peer - illegal peer id*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); @@ -315,7 +315,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_removepeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); RemovePeerResponse2 response; @@ -329,13 +329,13 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "remove peer: " << cntl.ErrorText(); } - /* remove peer - 发送给不是leader的peer */ + /*Remove peer - sent to peers who are not leaders*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); @@ -367,7 +367,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法copyset */ + /* Transfer leader - illegal copyset*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); @@ -375,7 +375,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); TransferLeaderRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -417,7 +417,7 @@ TEST_F(BraftCliService2Test, basic2) { stub.TransferLeader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法peer */ + /*Transfer leader - illegal peer*/ { Peer *leaderPeer = new Peer(); Peer *peer = new Peer(); @@ -430,7 +430,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_transferee(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); TransferLeaderResponse2 response; @@ -444,7 +444,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "Transfer leader peer: " << cntl.ErrorText(); } - /* get leader - 非法copyset */ + /*Get leader - illegal copyset*/ { PeerId leaderId = leaderId; brpc::Channel channel; @@ -455,7 +455,7 @@ TEST_F(BraftCliService2Test, basic2) { GetLeaderResponse2 response; brpc::Controller cntl; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); @@ -472,7 +472,7 @@ TEST_F(BraftCliService2Test, basic2) { Peer *leaderPeer2 = new Peer(); Peer *addPeer = new Peer(); PeerId removePeerId; - // 找一个不是leader的peer,作为remove peer + // Find a peer that is not a leader as a remove peer if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { removePeerId.parse(peer2.address()); @@ -529,14 +529,14 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl2.Failed()); ASSERT_EQ(0, cntl2.ErrorCode()); } - /* snapshot - 非法copyset */ + /*Snapshot - illegal copyset*/ { PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); SnapshotRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); Peer *peerPtr = new Peer(); @@ -557,7 +557,7 @@ TEST_F(BraftCliService2Test, basic2) { } /* snapshot - normal */ { - // 初始状态快照不为空 + // The initial state snapshot is not empty std::string copysetDataDir = dirMap[gLeader.address()] + "/" + ToGroupId(logicPoolId, copysetId) + "/" + RAFT_LOG_DIR; std::shared_ptr fs( @@ -588,7 +588,7 @@ TEST_F(BraftCliService2Test, basic2) { stub.Snapshot(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " << cntl.ErrorText(); - // 需要连续打两次快照才能删除第一次快照时的log + // Two consecutive snapshots are required to delete the log from the first snapshot sleep(5); cntl.Reset(); LOG(INFO) << "Start do snapshot"; @@ -598,7 +598,7 @@ TEST_F(BraftCliService2Test, basic2) { for (int i = 0; i < 60; ++i) { files.clear(); fs->List(copysetDataDir.c_str(), &files); - // 打完快照应该只剩下meta信息 + //After taking the snapshot, only meta information should be left if (files.size() == 1) { break; } @@ -622,7 +622,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl.Failed()) << "Do snapshot all fail, error: " << cntl.ErrorText(); } - /* reset peer - 非法 copyset */ + /*Reset peer - illegal copyset*/ { Peer *targetPeer = new Peer(); *targetPeer = peer1; @@ -630,7 +630,7 @@ TEST_F(BraftCliService2Test, basic2) { brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); ResetPeerRequest2 request; - /* 非法 copyset */ + /*Illegal copyset*/ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_requestpeer(targetPeer); @@ -646,7 +646,7 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* reset peer - new peer为空 */ + /*Reset peer - new peer is empty*/ { Peer *targetPeer = new Peer(); *targetPeer = peer1; diff --git a/test/chunkserver/braft_cli_service_test.cpp b/test/chunkserver/braft_cli_service_test.cpp index 50f04588af..721b8c04e8 100644 --- a/test/chunkserver/braft_cli_service_test.cpp +++ b/test/chunkserver/braft_cli_service_test.cpp @@ -127,7 +127,7 @@ TEST_F(BraftCliServiceTest, basic) { return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -166,7 +166,7 @@ TEST_F(BraftCliServiceTest, basic) { options.timeout_ms = 1500; options.max_retry = 3; - /* add peer - 非法 copyset */ + /* Add peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -188,7 +188,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法 peerid */ + /* add peer - illegal peerid */ { PeerId leaderId = leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leaderId); @@ -210,7 +210,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* add peer - sent to peers who are not leader */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); @@ -240,13 +240,13 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法 copyset */ + /* remove peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); RemovePeerRequest request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_leader_id(leaderId.to_string()); @@ -261,7 +261,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法 peer id */ + /* remove peer - illegal peer id */ { PeerId leaderId = leader; brpc::Channel channel; @@ -281,7 +281,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* remove peer - 发送给不是 leader 的 peer */ + /* remove peer - sent to peers who are not leaders */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); @@ -309,7 +309,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法 copyset */ + /* transfer leader - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -346,7 +346,7 @@ TEST_F(BraftCliServiceTest, basic) { stub.transfer_leader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法 peer */ + /* transfer leader - illegal peer */ { PeerId leaderId = leader; brpc::Channel channel; @@ -365,7 +365,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* get leader - 非法 copyset */ + /* get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 3968766d91..b28a6f2d3e 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -135,7 +135,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -313,7 +313,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -329,7 +329,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response.status()); } - /* Read 一个不存在的 Chunk */ + /*Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -347,7 +347,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response.status()); } - /* Applied index Read 一个不存在的 Chunk */ + /*Applied index Read a non-existent Chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -467,7 +467,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(0, channel.Init(leader.addr, NULL)); ChunkService_Stub stub(&channel); - // get hash : 访问不存在的chunk + // Get hash: Access non-existent chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -485,7 +485,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_STREQ("0", response.hash().c_str()); } - // get hash : 非法的offset和length + // Get hash: illegal offset and length { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -560,7 +560,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(1, response.chunksn().size()); } - // get hash : 访问存在的chunk + // Get hash: Access existing chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -579,7 +579,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } - /* 多 chunk read/write/delete */ + /*Multi chunk read/write/delete*/ { brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { @@ -685,7 +685,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /*Delete a non-existent chunk (duplicate deletion)*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -703,7 +703,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Channel channel; uint32_t requestSize = kOpRequestAlignSize; @@ -770,7 +770,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* read 一个不存在的 chunk */ + /*Read a non-existent chunk*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 674220d91a..c9b0c1f5e2 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -134,7 +134,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -177,13 +177,13 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); - /* 非法参数 request 测试 */ + /*Illegal parameter request test*/ brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; } ChunkService_Stub stub(&channel); - /* read 溢出 */ + /*Read overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -201,7 +201,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read offset没对齐 */ + /*Read offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -219,7 +219,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read size没对齐 */ + /*Read size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -237,7 +237,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read copyset 不存在 */ + /*Read copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -256,7 +256,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* read snapshot 溢出 */ + /*Read snapshot overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -274,7 +274,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot offset没对齐 */ + /*Read snapshot offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -293,7 +293,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot size没对齐 */ + /*Read snapshot size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -312,7 +312,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot copyset 不存在 */ + /*Read snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -331,7 +331,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* write 溢出 */ + /*Write overflow*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -350,7 +350,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write offset没对齐 */ + /*Write offset not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -369,7 +369,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write size没对齐 */ + /*Write size not aligned*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -388,7 +388,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write copyset 不存在 */ + /*The write copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -407,7 +407,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete copyset 不存在*/ + /*Delete copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -423,7 +423,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete snapshot copyset 不存在*/ + /*Delete snapshot copyset does not exist*/ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -456,7 +456,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* 不是 leader */ + /*Not a leader*/ { PeerId peer1; PeerId peer2; @@ -568,7 +568,7 @@ class ChunkServiceTestClosure : public ::google::protobuf::Closure { void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + //Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -586,7 +586,7 @@ class UpdateEpochTestClosure : public ::google::protobuf::Closure { void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + //Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -780,17 +780,17 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { }; std::vector threads; - // 启动10个线程,将chunkserver压满 + // Start 10 threads to fully load the chunkserver for (int i = 0; i < 10; ++i) { std::thread t1(writeFunc); threads.push_back(std::move(t1)); } - // 等待进程启动起来 + // Waiting for the process to start ::usleep(500 * 1000); ASSERT_FALSE(inflightThrottle->IsOverLoad()); - // 压满之后chunkserver后面收到的request都会被拒绝 + // All requests received after the chunkserver is filled will be rejected // write chunk { brpc::Controller cntl; @@ -916,7 +916,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } - // 等待request处理完成,之后chunkserver又重新可以接收新的request + // Wait for the request processing to complete, and then chunkserver can receive new requests again for (auto it = threads.begin(); it != threads.end(); ++it) { it->join(); } diff --git a/test/chunkserver/chunkserver_helper_test.cpp b/test/chunkserver/chunkserver_helper_test.cpp index e9d538bf0c..c43ba46d6f 100644 --- a/test/chunkserver/chunkserver_helper_test.cpp +++ b/test/chunkserver/chunkserver_helper_test.cpp @@ -27,7 +27,7 @@ namespace curve { namespace chunkserver { TEST(ChunkServerMeta, test_encode_and_decode) { - // 1. 正常编解码 + // 1. Normal encoding and decoding ChunkServerMetadata metadata; metadata.set_version(CURRENT_METADATA_VERSION); metadata.set_id(1); @@ -43,13 +43,13 @@ TEST(ChunkServerMeta, test_encode_and_decode) { ASSERT_EQ(metadata.id(), metaOut.id()); ASSERT_EQ(metadata.token(), metaOut.token()); - // 2. 编码异常 + // 2. Encoding anomaly metadata.clear_token(); strOut.clear(); ASSERT_FALSE( ChunkServerMetaHelper::EncodeChunkServerMeta(metadata, &strOut)); - // 3. 解码异常 + // 3. Decoding exception metadata.set_token("hello"); metadata.set_checksum(9999); ASSERT_TRUE( diff --git a/test/chunkserver/chunkserver_service_test.cpp b/test/chunkserver/chunkserver_service_test.cpp index 106501e267..14bf6e8bce 100644 --- a/test/chunkserver/chunkserver_service_test.cpp +++ b/test/chunkserver/chunkserver_service_test.cpp @@ -36,7 +36,7 @@ using ::testing::Return; using ::testing::_; TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { - // 启动ChunkServerService + // Start ChunkServerService auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = @@ -53,7 +53,7 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ChunkServerStatusRequest request; ChunkServerStatusResponse response; - // 1. 指定chunkserver加载copyset完成 + // 1. Specify chunkserver to load copyset complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); @@ -63,7 +63,7 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_FALSE(response.copysetloadfin()); } - // 2. 指定chunkserver加载copyset未完成 + // 2. The specified chunkserver loading copyset did not complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(true)); @@ -73,13 +73,13 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_TRUE(response.copysetloadfin()); } - // 停止chunkserver service + // Stop chunkserver service server->Stop(0); server->Join(); delete server; server = nullptr; - // 3. 未获取到指定chunkserver加载copyset状态 + // 3. Unable to obtain the specified chunkserver loading copyset status { brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); diff --git a/test/chunkserver/chunkserver_snapshot_test.cpp b/test/chunkserver/chunkserver_snapshot_test.cpp index b534ca2ee3..dec864765b 100644 --- a/test/chunkserver/chunkserver_snapshot_test.cpp +++ b/test/chunkserver/chunkserver_snapshot_test.cpp @@ -77,18 +77,18 @@ class ChunkServerSnapshotTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, @@ -152,14 +152,14 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, @@ -198,14 +198,14 @@ static void ReadVerify(PeerId leaderId, } /** - * 异常 I/O 验证,验证集群是否处于不可用状态 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Abnormal I/O verification to verify if the cluster is in an unavailable state + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ static void ReadVerifyNotAvailable(PeerId leaderId, LogicPoolID logicPoolId, @@ -242,11 +242,11 @@ static void ReadVerifyNotAvailable(PeerId leaderId, } /** - * 验证copyset status是否符合预期 + * Verify if the copyset status meets expectations * @param peerId: peer id - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id - * @param expectResp: 期待的copyset status + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID + * @param expectResp: Expected copyset status */ static void CopysetStatusVerify(PeerId peerId, LogicPoolID logicPoolID, @@ -279,10 +279,10 @@ static void CopysetStatusVerify(PeerId peerId, } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ static void CopysetStatusVerify(const std::vector &peerIds, LogicPoolID logicPoolID, @@ -309,7 +309,7 @@ static void CopysetStatusVerify(const std::vector &peerIds, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); @@ -333,9 +333,9 @@ static void CopysetStatusVerify(const std::vector &peerIds, butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组是否能够正常提供服务 - * 1. 创建一个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the replication group of one node can provide services normally + * 1. Create a replication group for a replica + * 2. Wait for the leader to generate, write the data, and then read it out for verification */ TEST_F(ChunkServerSnapshotTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -364,7 +364,7 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { loop); CopysetStatusResponse expectResp; - // read、write、1次配置变更 + // read, write, 1 configuration change int64_t commitedIndex = loop + 1; expectResp.set_status(COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); expectResp.set_state(braft::STATE_LEADER); @@ -390,12 +390,12 @@ TEST_F(ChunkServerSnapshotTest, OneNode) { } /** - * 验证1个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建1个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of one node can provide normal service + * 1. Create a replication group for 1 replica + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { LogicPoolID logicPoolId = 2; @@ -424,7 +424,7 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { loop); ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); - // 测试发现集群不可用 + // Testing found that the cluster is not available ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, @@ -473,9 +473,9 @@ TEST_F(ChunkServerSnapshotTest, OneNodeShutdown) { } /** - * 验证2个节点是否能够正常提供服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether two nodes can provide services normally + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodes) { LogicPoolID logicPoolId = 2; @@ -511,12 +511,12 @@ TEST_F(ChunkServerSnapshotTest, TwoNodes) { } /** - * 验证2个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting two nodes after closing non leader nodes can provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -539,7 +539,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write + // Initiate read/write WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -548,7 +548,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -563,7 +563,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { ::usleep(2000 * electionTimeoutMs); - // 测试发现集群不可用 + // Testing found that the cluster is not available ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, @@ -575,9 +575,9 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -591,12 +591,12 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownOnePeer) { } /** - * 验证2个节点的关闭 leader 后重启是否能够正常服务 - * 1. 创建2个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader and restart of two nodes can provide normal service + * 1. Create a replication group of 2 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -619,7 +619,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write + // Initiate read/write WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -630,7 +630,7 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群不可用 + // Testing found that the cluster is not available ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, @@ -642,9 +642,9 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -658,9 +658,9 @@ TEST_F(ChunkServerSnapshotTest, TwoNodesShutdownLeader) { } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { LogicPoolID logicPoolId = 2; @@ -685,7 +685,7 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -699,12 +699,12 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodes) { } /** - * 验证3个节点的关闭非 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether restarting after closing non leader nodes on three nodes can provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown is not a leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { LogicPoolID logicPoolId = 2; @@ -729,7 +729,7 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write + // Initiate read/write WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -738,7 +738,7 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -750,9 +750,9 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -766,12 +766,12 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownOnePeer) { } /** - * 验证3个节点的关闭 leader 节点 后重启是否能够正常服务 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown leader,然后再拉起来 - * 4. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 5. 再 write 数据,再 read 出来验证一遍 + * Verify whether the shutdown of the leader node and restart of three nodes can provide normal service + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown leader, then pull it up again + * 4. Wait for the leader to be generated, and then verify the data written before the read + * 5. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { LogicPoolID logicPoolId = 2; @@ -796,7 +796,7 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write + // Initiate read/write WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -808,7 +808,7 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { // shutdown leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderId)); - // 测试发现集群暂时不可用 + // Testing found that the cluster is temporarily unavailable ReadVerifyNotAvailable(leaderId, logicPoolId, copysetId, @@ -820,13 +820,13 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { ASSERT_EQ(0, cluster.StartPeer(leaderId)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -840,18 +840,18 @@ TEST_F(ChunkServerSnapshotTest, ThreeNodesShutdownLeader) { } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval and write read data + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the node restarts again, it must pass the install snapshot, + * Because the log has been deleted + * 6. Wait for the leader to be generated, and then verify the data written before the read + * 7. transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -877,7 +877,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write + // Initiate read/write WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -886,7 +886,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -900,9 +900,9 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -911,7 +911,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ch + 1, loop); ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -920,13 +920,13 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ch + 2, loop); - // restart, 需要从 install snapshot 恢复 + // Restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 2, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -961,9 +961,9 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -977,21 +977,21 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 删除 shutdown peer 的数据目录,然后再拉起来 - * 10. 然后 read 之前写入的数据验证一遍 - * 11. transfer leader 到shut down 的 peer 上 - * 12. 在 read 之前写入的数据验证 - * 13. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the node restarts again, it must pass the install snapshot, + * Because the log has been deleted + * 9. Delete the data directory of the shutdown peer and then pull it up again + * 10. Then verify the data written before read + * 11. Transfer leader to shut down peer + * 12. Verification of data written before read + * 13. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { LogicPoolID logicPoolId = 2; @@ -1017,7 +1017,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write + // Initiate read/write WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -1026,7 +1026,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ch, loop); - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -1040,9 +1040,9 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -1051,9 +1051,9 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ch + 1, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -1062,7 +1062,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ch + 2, loop); ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -1071,7 +1071,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ch + 3, loop); - // 删除此 peer 的数据,然后重启 + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(TestCluster::RemoveCopysetDirCmd(shutdownPeerid) .c_str())); //NOLINT @@ -1087,7 +1087,7 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); // Wait shutdown peer recovery, and then transfer leader to it @@ -1116,9 +1116,9 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), shutdownPeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId, length, ch + 3, loop); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -1132,22 +1132,22 @@ TEST_F(ChunkServerSnapshotTest, ShutdownOnePeerAndRemoveData) { } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更 add peer - * 10. 然后 read 之前写入的数据验证一遍 - * 11. 在发起 write,再 read 读出来验证一遍 - * 12. transfer leader 到 add 的 peer 上 - * 13. 在 read 之前写入的数据验证 - * 14. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown of non leader nodes on three nodes, restart, and control the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown non leader + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the node restarts again, it must pass the install snapshot, + * Because the log has been deleted + * 9. Add peer through configuration changes + * 10. Then verify the data written before read + * 11. Initiate write and read again to verify + * 12. Transfer leader to add's peer + * 13. Verification of data written before read + * 14. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1173,7 +1173,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1184,7 +1184,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId shutdownPeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -1198,11 +1198,11 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1213,9 +1213,9 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { loop); } - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1226,7 +1226,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { loop); } ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1237,7 +1237,7 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { loop); } - // add 一个 peer + // Add a peer { ASSERT_EQ(0, cluster.StartPeer(peer4, true)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); @@ -1253,11 +1253,11 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1294,11 +1294,11 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), peer4.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 4, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1321,20 +1321,20 @@ TEST_F(ChunkServerSnapshotTest, AddPeerAndRecoverFromInstallSnapshot) { } /** - * * 验证3个节点的 remove 一个节点,然后再 add 回来,并控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. 通过配置变更 remove 一个 非 leader - * 4. read 之前 write 的数据验证一遍 - * 5. 再 write 数据,然后 read 出来验证一遍 - * 6. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 7. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了 - * 9. 通过配置变更再将之前 remove 的 peer add 回来 - * 10. transfer leader 到此 peer - * 11. 在 read 之前写入的数据验证 - * 12. 再 write 数据,再 read 出来验证一遍 + * Verify the removal of one node from three nodes, then add it back and control it to recover from the install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Remove a non leader through configuration changes + * 4. Verify the data written before read + * 5. Write the data again, and then read it out for verification + * 6. Then sleep exceeds one snapshot interval and write read data + * 7. Then sleep for more than one snapshot interval and write read data; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the node restarts again, it must pass the install snapshot, + * Because the log has been deleted + * 9. Add the previously removed peer back through configuration changes + * 10. Transfer leader to this peer + * 11. Verification of data written before read + * 12. Write the data again and read it out for verification */ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -1360,7 +1360,7 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 发起 read/write,多个 chunk file + // Initiate read/write, multiple chunk files for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1371,7 +1371,7 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { loop); } - // shutdown 某个非 leader 的 peer + // Shutdown a non leader peer PeerId removePeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -1383,7 +1383,7 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderId.to_string(); ASSERT_NE(0, ::strcmp(removePeerid.to_string().c_str(), leaderId.to_string().c_str())); - // remove 一个 peer + // Remove a peer { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; @@ -1397,11 +1397,11 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, status.error_code()); } - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1412,9 +1412,9 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { loop); } - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1425,7 +1425,7 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { loop); } ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, @@ -1436,7 +1436,7 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { loop); } - // add 回来 + // Add, come back { Configuration conf = cluster.CopysetConf(); braft::cli::CliOptions options; @@ -1476,11 +1476,11 @@ TEST_F(ChunkServerSnapshotTest, RemovePeerAndRecoverFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderId.to_string().c_str(), removePeerid.to_string().c_str())); - // 读出来验证一遍 + // Read it out and verify it again for (int i = 0; i < kMaxChunkId; ++i) { ReadVerify(leaderId, logicPoolId, copysetId, i, length, ch + 3, loop); } - // 再次发起 read/write + // Initiate read/write again for (int i = 0; i < kMaxChunkId; ++i) { WriteThenReadVerify(leaderId, logicPoolId, diff --git a/test/chunkserver/chunkserver_test_util.cpp b/test/chunkserver/chunkserver_test_util.cpp index cb2d020048..a5dfc7e0b2 100644 --- a/test/chunkserver/chunkserver_test_util.cpp +++ b/test/chunkserver/chunkserver_test_util.cpp @@ -87,7 +87,7 @@ std::shared_ptr InitFilePool(std::shared_ptr fsptr, delete[] data; } /** - * 持久化FilePool meta file + * Persisting FilePool meta file */ FilePoolMeta meta; @@ -225,7 +225,7 @@ butil::Status WaitLeader(const LogicPoolID &logicPoolId, status = GetLeader(logicPoolId, copysetId, conf, leaderId); if (status.ok()) { /** - * 等待 flush noop entry + * Waiting for flush noop entry */ ::usleep(electionTimeoutMs * 1000); return status; @@ -299,7 +299,7 @@ int TestCluster::StartPeer(const PeerId &peerId, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ + /*Starting a ChunkServer in a child process*/ StartPeerNode(peer->options, peer->conf, getChunkFromPool, createChunkFilePool); exit(0); @@ -384,7 +384,7 @@ int TestCluster::ContPeer(const PeerId &peerId) { int TestCluster::WaitLeader(PeerId *leaderId) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(2 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -393,8 +393,8 @@ int TestCluster::WaitLeader(PeerId *leaderId) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderId); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the election to provide services, + * So we need to wait for the noop application here. If the wait time is too short, it may be easy to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " @@ -441,7 +441,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, bool enableGetchunkFromPool, bool createChunkFilePool) { /** - * 用于注释,说明 cmd format + * Used for annotation to explain the cmd format */ std::string cmdFormat = R"( ./bazel-bin/test/chunkserver/server-test @@ -466,7 +466,7 @@ int TestCluster::StartPeerNode(CopysetNodeOptions options, confStr += it->to_string(); confStr += ","; } - // 去掉最后的逗号 + // Remove the last comma confStr.pop_back(); std::string cmd_dir("./bazel-bin/test/chunkserver/server-test"); diff --git a/test/chunkserver/chunkserver_test_util.h b/test/chunkserver/chunkserver_test_util.h index b329e069cd..f89070936d 100644 --- a/test/chunkserver/chunkserver_test_util.h +++ b/test/chunkserver/chunkserver_test_util.h @@ -45,13 +45,13 @@ using curve::fs::LocalFileSystem; std::string Exec(const char *cmd); /** - * 当前FilePool需要事先格式化,才能使用,此函数用于事先格式化FilePool - * @param fsptr:本文文件系统指针 - * @param chunkfileSize:chunk文件的大小 - * @param metaPageSize:chunk文件的meta page大小 - * @param poolpath:文件池的路径,例如./chunkfilepool/ - * @param metaPath:meta文件路径,例如./chunkfilepool/chunkfilepool.meta - * @return 初始化成功返回FilePool指针,否则返回null + * The current FilePool needs to be formatted in advance before it can be used. This function is used to format the FilePool in advance + * @param fsptr: This article's file system pointer + * @param chunkfileSize: Chunk file size + * @param metaPageSize: The metapage size of the chunk file + * @param poolpath: The path to the file pool, for example ./chunkfilepool/ + * @param metaPath: meta file path, for example ./chunkfilepool/chunkfilepool.meta + * @return successfully initializes and returns the FilePool pointer. Otherwise, it returns null */ std::shared_ptr InitFilePool(std::shared_ptr fsptr, //NOLINT int chunkfileCount, @@ -74,36 +74,36 @@ butil::Status WaitLeader(const LogicPoolID &logicPoolId, int electionTimeoutMs); /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), options(), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; - // Peer的地址 + // Peer's address PeerId peerId; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // copyset的基本配置 + // Basic configuration of copyset CopysetNodeOptions options; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; /** - * 封装模拟 cluster 测试相关的接口 + * Package simulation cluster testing related interfaces */ class TestCluster { public: @@ -115,55 +115,55 @@ class TestCluster { public: /** - * 启动一个 Peer + * Start a Peer * @param peerId - * @param empty 初始化配置是否为空 - * @param: get_chunk_from_pool是否从FilePool获取chunk - * @param: createFilePool是否创建FilePool,重启的情况下不需要 - * @return 0:成功,-1 失败 + * @param empty Is the initialization configuration empty + * @param: get_chunk_from_pool Does obtain a chunk from FilePool + * @param: createFilePool: create a FilePool? It is not necessary to restart it + * @return 0: Success, -1 failed */ int StartPeer(const PeerId &peerId, const bool empty = false, bool getChunkFrom_pool = false, bool createFilePool = true); /** - * 关闭一个 peer,使用 SIGINT + * Close a peer and use SIGINT * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ int ShutdownPeer(const PeerId &peerId); /** - * hang 住一个 peer,使用 SIGSTOP + * Hang lives in a peer and uses SIGSTOP * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ int StopPeer(const PeerId &peerId); /** - * 恢复 hang 住的 peer,使用 SIGCONT + * Restore the peer where Hang lives and use SIGCONT * @param peerId - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ int ContPeer(const PeerId &peerId); /** - * 反复重试直到等到新的 leader 产生 - * @param leaderId 出参,返回 leader id - * @return 0:成功,-1 失败 + * Try again and again until a new leader is generated + * @param leaderId takes a parameter and returns the leader id + * @return 0: Success, -1 failed */ int WaitLeader(PeerId *leaderId); /** - * Stop 所有的 peer - * @return 0:成功,-1 失败 + * Stop all peers + * @return 0: Success, -1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /*Returns the current configuration of the cluster*/ const Configuration CopysetConf() const; - /* 修改 PeerNode 配置相关的接口,单位: s */ + /*Modify the interface related to PeerNode configuration, unit: s*/ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); int SetCatchupMargin(int catchupMargin); @@ -175,11 +175,11 @@ class TestCluster { public: /** - * 返回执行 peer 的 copyset 路径 with protocol, ex: local://./127.0.0.1:9101:0 + * Returns the copyset path for executing peer with protocol, ex: local://./127.0.0.1:9101:0 */ static const std::string CopysetDirWithProtocol(const PeerId &peerId); /** - * 返回执行 peer 的 copyset 路径 without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: ./127.0.0.1:9101:0 */ static const std::string CopysetDirWithoutProtocol(const PeerId &peerId); /** @@ -188,25 +188,25 @@ class TestCluster { static const std::string RemoveCopysetDirCmd(const PeerId &peerid); private: - // 集群名字 + // Cluster Name std::string clusterName_; - // 集群的peer集合 + // The peer set of the cluster std::set peers_; - // peer集合的映射map + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 + // Snapshot interval int snapshotIntervalS_; - // 选举超时时间 + // Election timeout int electionTimeoutMs_; - // catchup margin配置 + // Catchup margin configuration int catchupMargin_; - // 集群成员配置 + // Cluster member configuration Configuration conf_; - // 逻辑池id + // Logical Pool ID static LogicPoolID logicPoolID_; - // 复制组id + // Copy Group ID static CopysetID copysetID_; }; diff --git a/test/chunkserver/cli2_test.cpp b/test/chunkserver/cli2_test.cpp index d4d482d118..66bc1d8cbf 100644 --- a/test/chunkserver/cli2_test.cpp +++ b/test/chunkserver/cli2_test.cpp @@ -90,7 +90,7 @@ TEST_F(Cli2Test, basic) { int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment is prone to timeout */ int electionTimeoutMs = 3000; @@ -157,7 +157,7 @@ TEST_F(Cli2Test, basic) { return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3, pid_t pid4) { @@ -197,11 +197,11 @@ TEST_F(Cli2Test, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /*Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion of copying a log entry + * Time to avoid occasional timeout errors in the CI environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -219,10 +219,10 @@ TEST_F(Cli2Test, basic) { LOG(INFO) << "remove peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it is necessary to wait until a new leader is generated, + * Otherwise, the add peer test below will fail and wait for a long time to ensure removal + * After the successful election of the new leader, switch to the flush configuration of the become leader + * Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); butil::Status status = WaitLeader(logicPoolId, copysetId, @@ -246,7 +246,7 @@ TEST_F(Cli2Test, basic) { << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0"); @@ -281,13 +281,13 @@ TEST_F(Cli2Test, basic) { LOG(INFO) << "transfer leader: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not wait for the leader to transfer + * We only return after success, so we need to wait here. In addition, we cannot immediately check the leader because + * After the leader transfer, the previous leader may be returned, except for the transfer + * After the leader is successful, when the benefit leader is in progress, the leader is already visible, but + * The benefit leader will execute flush current conf to act as the noop. If at this time + * Immediately proceed to the next transfer leader, which will be organized because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); butil::Status status = WaitLeader(logicPoolId, copysetId, @@ -340,7 +340,7 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.address().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, @@ -378,7 +378,7 @@ TEST_F(Cli2Test, basic) { } /* reset peer */ { - // 等待change peer完成,否则用例会失败 + // Wait for the change peer to complete, otherwise the instance will fail sleep(3); Peer peer; peer.set_address("127.0.0.1:9033:0"); @@ -392,7 +392,7 @@ TEST_F(Cli2Test, basic) { ASSERT_TRUE(status.ok()); } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -401,7 +401,7 @@ TEST_F(Cli2Test, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; Peer leader; @@ -410,11 +410,11 @@ TEST_F(Cli2Test, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ Peer peer; peer.set_address("127.0.0.1:9039:2"); butil::Status status = curve::chunkserver::AddPeer(logicPoolId, @@ -426,7 +426,7 @@ TEST_F(Cli2Test, basic) { LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9035:2"); @@ -444,7 +444,7 @@ TEST_F(Cli2Test, basic) { << status.error_str(); } } - /* change peers - 不存在的 peer */ + /*Change peers - non-existent peers*/ { Configuration conf; conf.parse_from("127.0.0.1:9033:0,127.0.0.1:9034:0,127.0.0.1:9036:0"); @@ -459,7 +459,7 @@ TEST_F(Cli2Test, basic) { LOG(INFO) << "change peers: " << status.error_code() << ", " << status.error_str(); } - /* reset peer - newConf为空 */ + /*Reset peer - newConf is empty*/ { Configuration conf; Peer peer; @@ -474,7 +474,7 @@ TEST_F(Cli2Test, basic) { << status.error_str(); ASSERT_EQ(EINVAL, status.error_code()); } - /* reset peer peer地址非法 */ + /*Illegal reset peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); @@ -487,7 +487,7 @@ TEST_F(Cli2Test, basic) { << status.error_code() << ", " << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* reset peer peer地址不存在 */ + /*Reset peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); @@ -500,7 +500,7 @@ TEST_F(Cli2Test, basic) { << status.error_code() << ", " << status.error_str(); ASSERT_EQ(EHOSTDOWN, status.error_code()); } - /* snapshot peer地址非法 */ + /*Illegal snapshot peer address*/ { Peer peer; peer.set_address("127.0.0.1:65540:0"); @@ -512,7 +512,7 @@ TEST_F(Cli2Test, basic) { << status.error_code() << ", " << status.error_str(); ASSERT_EQ(-1, status.error_code()); } - /* snapshot peer地址不存在 */ + /*The snapshot peer address does not exist*/ { Peer peer; peer.set_address("127.0.0.1:9040:0"); diff --git a/test/chunkserver/cli_test.cpp b/test/chunkserver/cli_test.cpp index 111ec23773..5ab14ca0cc 100644 --- a/test/chunkserver/cli_test.cpp +++ b/test/chunkserver/cli_test.cpp @@ -84,7 +84,7 @@ TEST_F(CliTest, basic) { int snapshotInterval = 600; /** - * 设置更大的默认选举超时时间,因为当前 ci 环境很容易出现超时 + * Set a larger default election timeout because the current CI environment is prone to timeout */ int electionTimeoutMs = 3000; @@ -136,7 +136,7 @@ TEST_F(CliTest, basic) { return; } - /* 保证进程一定会退出 */ + /*Ensure that the process will definitely exit*/ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -172,11 +172,11 @@ TEST_F(CliTest, basic) { WaitLeader(logicPoolId, copysetId, conf, &leader, electionTimeoutMs); ASSERT_TRUE(status.ok()); - /* 等待 transfer leader 成功 */ + /* Waiting for transfer leader to succeed*/ int waitTransferLeader = 3000 * 1000; /** - * 配置变更因为设置一条 log entry 的完成复制,所以设置较长的 timeout - * 时间,以免在 ci 环境偶尔会出现超时出错 + * The configuration change requires a longer timeout due to the completion of copying a log entry + * Time to avoid occasional timeout errors in the CI environment */ braft::cli::CliOptions opt; opt.timeout_ms = 6000; @@ -193,10 +193,10 @@ TEST_F(CliTest, basic) { LOG(INFO) << "remove peer: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); - /* 可能移除的是 leader,如果移除的是 leader,那么需要等到新的 leader 产生, - * 否则下面的 add peer 测试就会失败, wait 较长时间,是为了保证 remove - * leader 之后新 leader 选举成功,切 become leader 的 flush config - * 完成 */ + /* It is possible to remove a leader. If a leader is being removed, it is necessary to wait until a new leader is generated, + * Otherwise, the add peer test below will fail and wait for a long time to ensure removal + * After the successful election of the new leader, switch to the flush configuration of the become leader + * Complete*/ ::usleep(1.5 * 1000 * electionTimeoutMs); butil::Status status = WaitLeader(logicPoolId, copysetId, @@ -219,7 +219,7 @@ TEST_F(CliTest, basic) { << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); } - /* 重复 add 同一个 peer */ + /*Repeatedly add the same peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0"); @@ -250,13 +250,13 @@ TEST_F(CliTest, basic) { LOG(INFO) << "transfer leader: " << st.error_code() << ", " << st.error_str(); ASSERT_TRUE(st.ok()); - /* transfer leader 只是讲 rpc 发送给leader,并不会等 leader transfer - * 成功才返回,所以这里需要等,除此之外,并不能立马去查 leader,因为 - * leader transfer 之后,可能返回之前的 leader,除此之外 transfer - * leader 成功了之后,become leader 进行时,leader 已经可查,但是 - * become leader 会执行 flush 当前 conf 来充当 noop,如果这个时候 - * 立马进行下一个 transfer leader,会被组织,因为同时只能有一个配置 - * 变更在进行 */ + /* The transfer leader only sends rpc to the leader and does not wait for the leader to transfer + * We only return after success, so we need to wait here. In addition, we cannot immediately check the leader because + * After the leader transfer, the previous leader may be returned, except for the transfer + * After the leader is successful, when the benefit leader is in progress, the leader is already visible, but + * The benefit leader will execute flush current conf to act as the noop. If at this time + * Immediately proceed to the next transfer leader, which will be organized because there can only be one configuration at the same time + * Changes in progress*/ ::usleep(waitTransferLeader); butil::Status status = WaitLeader(logicPoolId, copysetId, @@ -309,7 +309,7 @@ TEST_F(CliTest, basic) { ASSERT_TRUE(status.ok()); ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } - /* transfer 给 leader 给 leader,仍然返回成功 */ + /*Transfer to leader to leader, still returns success*/ { LOG(INFO) << "start transfer leader"; butil::Status st = curve::chunkserver::TransferLeader(logicPoolId, @@ -330,7 +330,7 @@ TEST_F(CliTest, basic) { ASSERT_STREQ(peer3.to_string().c_str(), leader.to_string().c_str()); } } - /* 异常分支测试 */ + /*Abnormal Branch Test*/ /* get leader - conf empty */ { Configuration conf; @@ -338,7 +338,7 @@ TEST_F(CliTest, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(EINVAL, status.error_code()); } - /* get leader - 非法的地址 */ + /*Get leader - illegal address*/ { Configuration conf; conf.parse_from("127.0.0.1:65540:0,127.0.0.1:65541:0,127.0.0.1:65542:0"); //NOLINT @@ -346,11 +346,11 @@ TEST_F(CliTest, basic) { ASSERT_FALSE(status.ok()); ASSERT_EQ(-1, status.error_code()); } - /* add peer - 不存在的 peer */ + /*Add peer - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9030:2"); - /* 添加一个根本不存在的节点 */ + /*Add a non-existent node*/ PeerId peerId("127.0.0.1:9039:2"); butil::Status status = curve::chunkserver::AddPeer(logicPoolId, copysetId, @@ -361,7 +361,7 @@ TEST_F(CliTest, basic) { LOG(INFO) << "add peer: " << status.error_code() << ", " << status.error_str(); } - /* transfer leader - 不存在的 peer */ + /*Transfer leader - non-existent peer*/ { Configuration conf; conf.parse_from("127.0.0.1:9030:0,127.0.0.1:9031:0,127.0.0.1:9032:0"); diff --git a/test/chunkserver/client.cpp b/test/chunkserver/client.cpp index 7f8c2e6243..619bd8c9ff 100644 --- a/test/chunkserver/client.cpp +++ b/test/chunkserver/client.cpp @@ -72,7 +72,7 @@ int main(int argc, char *argv[]) { - // 创建 copyset + // Create copyset { std::vector peers; conf.list_peers(&peers); diff --git a/test/chunkserver/clone/clone_copyer_test.cpp b/test/chunkserver/clone/clone_copyer_test.cpp index 3c15969d9a..b475730450 100644 --- a/test/chunkserver/clone/clone_copyer_test.cpp +++ b/test/chunkserver/clone/clone_copyer_test.cpp @@ -133,8 +133,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,读取成功 - * 预期:调用Open和Read读取数据 + /* Use case: Reading data on curve, successful reading + * Expected: Calling Open and Read to read data */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test", _, true)) @@ -151,8 +151,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:再次读前面的文件,但是ret值为-1 - * 预期:直接Read,返回失败 + /* Use case: Read the previous file again, but the ret value is -1 + * Expected: Direct Read, return failed */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) @@ -169,8 +169,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Open的时候失败 - * 预期:返回-1 + /* Use case: Reading data on curve, failed during Open + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) @@ -182,8 +182,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读curve上的数据,Read的时候失败 - * 预期:返回-1 + /* Use case: Failed to read data on curve + * Expected: Return -1 */ context.location = "test2:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly("test2", _, true)) @@ -196,8 +196,8 @@ TEST_F(CloneCopyerTest, BasicTest) { closure.Reset(); - /* 用例:读s3上的数据,读取成功 - * 预期:返回0 + /* Use case: Reading data on s3, successful reading + * Expected: Return 0 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) @@ -211,8 +211,8 @@ TEST_F(CloneCopyerTest, BasicTest) { ASSERT_FALSE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 - * 预期:返回-1 + /* Use case: Read data on s3, read failed + * Expected: Return -1 */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) @@ -250,7 +250,7 @@ TEST_F(CloneCopyerTest, DisableTest) { options.curveUser.owner = ROOT_OWNER; options.curveUser.password = ROOT_PWD; options.curveFileTimeoutSec = EXPIRED_USE; - // 禁用curveclient和s3adapter + // Disable curveclient and s3adapter options.curveClient = nullptr; options.s3Client = nullptr; @@ -259,7 +259,7 @@ TEST_F(CloneCopyerTest, DisableTest) { .Times(0); ASSERT_EQ(0, copyer.Init(options)); - // 从上s3或者curve请求下载数据会返回失败 + // Requesting data download from s3 or curve above will return a failure { char* buf = new char[4096]; AsyncDownloadContext context; @@ -268,7 +268,7 @@ TEST_F(CloneCopyerTest, DisableTest) { context.buf = buf; MockDownloadClosure closure(&context); - /* 用例:读curve上的数据,读取失败 + /* Use case: Read data on curve, read failed */ context.location = "test:0@cs"; EXPECT_CALL(*curveClient_, Open4ReadOnly(_, _, true)) @@ -280,7 +280,7 @@ TEST_F(CloneCopyerTest, DisableTest) { ASSERT_TRUE(closure.IsFailed()); closure.Reset(); - /* 用例:读s3上的数据,读取失败 + /* Use case: Read data on s3, read failed */ context.location = "test@s3"; EXPECT_CALL(*s3Client_, GetObjectAsync(_)) @@ -291,7 +291,7 @@ TEST_F(CloneCopyerTest, DisableTest) { closure.Reset(); delete [] buf; } - // fini 可以成功 + // Fini can succeed ASSERT_EQ(0, copyer.Fini()); } diff --git a/test/chunkserver/clone/clone_core_test.cpp b/test/chunkserver/clone/clone_core_test.cpp index 86d6a70898..7d054a69b1 100644 --- a/test/chunkserver/clone/clone_core_test.cpp +++ b/test/chunkserver/clone/clone_core_test.cpp @@ -139,8 +139,8 @@ class CloneCoreTest }; /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不是clone chunk - * result:不会从远端拷贝数据,直接从本地读取数据,结果返回成功 + * Test CHUNK_OP_READ type request, requesting to read a chunk that is not a clone chunk + * Result: Will not copy data from the remote end, directly read data from the local, and the result is returned as successful */ TEST_P(CloneCoreTest, ReadChunkTest1) { off_t offset = 0; @@ -149,9 +149,9 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { = std::make_shared(SLICE_SIZE, true, copyer_); std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); - // 不会从源端拷贝数据 + // Will not copy data from the source EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -159,11 +159,11 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(1); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); @@ -176,15 +176,15 @@ TEST_P(CloneCoreTest, ReadChunkTest1) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk是clone chunk - * case1:请求读取的区域全部被写过 - * result1:全部从本地chunk读取 - * case2:请求读取的区域都未被写过 - * result2:全部从源端读取,产生paste请求 - * case3:请求读取的区域有部分被写过,部分未被写过 - * result3:写过区域从本地chunk读取,未写过区域从源端读取,产生paste请求 - * case4:请求读取的区域部分被写过,请求的偏移未与pagesize对齐 - * result4:返回错误 + * Test CHUNK_OP_READ type request, the requested chunk to read is a clone chunk + * Case1: All regions requested for reading have been written + * Result1: Read all from local chunk + * Case2: The requested read area has not been written + * Result2: Read all from the source and generate a pass request + * Case3: The requested read area has been partially written and partially unwritten + * Result3: Read from the local chunk for regions that have been written, and read from the source for regions that have not been written, resulting in a pass request + * Case4: The requested read area has been partially written, and the requested offset is not aligned with pagesize + * Result4: Error returned */ TEST_P(CloneCoreTest, ReadChunkTest2) { off_t offset = 0; @@ -200,22 +200,22 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillOnce(DoAll(SetArrayArgument<2>(chunkData, chunkData + length), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); @@ -237,7 +237,7 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); char *cloneData = new char[length]; @@ -252,11 +252,11 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -272,8 +272,8 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the concurrency layer for processing, + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -289,7 +289,7 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { { info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); char *cloneData = new char[length]; @@ -304,7 +304,7 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files char chunkData[pagesize_ + 2 * blocksize_]; // NOLINT(runtime/arrays) memset(chunkData, 'a', pagesize_ + 2 * blocksize_); EXPECT_CALL(*datastore_, @@ -313,9 +313,9 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { DoAll(SetArrayArgument<2>( chunkData, chunkData + pagesize_ + 2 * blocksize_), Return(CSErrorCode::Success))); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -331,15 +331,15 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the concurrency layer for processing, + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ(memcmp(chunkData, - closure->resContent_.attachment.to_string().c_str(), //NOLINT + closure->resContent_.attachment.to_string().c_str(), // NOLINT 3 * blocksize_), 0); ASSERT_EQ(memcmp(cloneData, - closure->resContent_.attachment.to_string().c_str() + 3 * blocksize_, //NOLINT + closure->resContent_.attachment.to_string().c_str() + 3 * blocksize_, // NOLINT 2 * blocksize_), 0); } // case4 @@ -349,7 +349,7 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { length = 4 * blocksize_; info.bitmap->Clear(); info.bitmap->Set(0, 2); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); @@ -357,11 +357,11 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(0); - // 不产生PasteChunkRequest + // Do not generate PasteChunkRequest braft::Task task; EXPECT_CALL(*node_, Propose(_)).Times(0); @@ -377,8 +377,8 @@ TEST_P(CloneCoreTest, ReadChunkTest2) { } /** - * 测试CHUNK_OP_READ类型请求,请求读取的chunk不存在,但是请求中包含源端数据地址 - * 预期结果:从源端下载数据,产生paste请求 + * Test CHUNK_OP_READ type request, the chunk requested for reading does not exist, but the request contains the source data address + * Expected result: Download data from the source and generate a pass request */ TEST_P(CloneCoreTest, ReadChunkTest3) { off_t offset = 0; @@ -395,7 +395,7 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); SetCloneParam(readRequest); @@ -410,11 +410,11 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .Times(2) .WillRepeatedly(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -430,8 +430,8 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the concurrency layer for processing, + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_EQ( @@ -445,13 +445,13 @@ TEST_P(CloneCoreTest, ReadChunkTest3) { } /** - * 执行HandleReadRequest过程中出现错误 - * case1:GetChunkInfo时出错 - * result1:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case2:Download时出错 - * result2:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN - * case3:ReadChunk时出错 - * result3:返回-1,response状态改为CHUNK_OP_STATUS_FAILURE_UNKNOWN + * An error occurred during the execution of HandleReadRequest + * Case1: Error in GetChunkInfo + * Result1: Returns -1, and the response status changes to CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Case2: Error downloading + * Result2: Returns -1, and the response status changes to CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Case3: Error in ReadChunk + * Result3: Returns -1, and the response status changes to CHUNK_OP_STATUS_FAILURE_UNKNOWN */ TEST_P(CloneCoreTest, ReadChunkErrorTest) { off_t offset = 0; @@ -532,7 +532,7 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { })); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -548,8 +548,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { closure->resContent_.status); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the concurrency layer for processing, + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); delete[] cloneData; @@ -557,8 +557,8 @@ TEST_P(CloneCoreTest, ReadChunkErrorTest) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk不是clone chunk - * result:不会从远端拷贝数据,也不会从本地读取数据,直接返回成功 + * Test CHUNK_OP_RECOVER type request, the requested chunk is not a clone chunk + *Result: Will not copy data remotely or read data locally, returns success directly */ TEST_P(CloneCoreTest, RecoverChunkTest1) { off_t offset = 0; @@ -567,9 +567,9 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { = std::make_shared(SLICE_SIZE, true, copyer_); std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); - // 不会从源端拷贝数据 + // Will not copy data from the sourc EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); - // 获取chunk信息 + // Obtain chunk information CSChunkInfo info; info.isClone = false; info.metaPageSize = pagesize_; @@ -577,9 +577,9 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { info.blockSize = blocksize_; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); @@ -592,11 +592,11 @@ TEST_P(CloneCoreTest, RecoverChunkTest1) { } /** - * 测试CHUNK_OP_RECOVER类型请求,请求的chunk是clone chunk - * case1:请求恢复的区域全部被写过 - * result1:不会拷贝数据,直接返回成功 - * case2:请求恢复的区域全部或部分未被写过 - * result2:从远端拷贝数据,并产生paste请求 + * Test CHUNK_OP_WRECOVER type request, the requested chunk is clone chunk + * Case1: All areas requested for recovery have been written + * Result1: Will not copy data, returns success directly + * Case2: The requested area for recovery has not been written in whole or in part + * Result2: Copy data from the remote end and generate a pass request */ TEST_P(CloneCoreTest, RecoverChunkTest2) { off_t offset = 0; @@ -612,16 +612,16 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { // case1 { info.bitmap->Set(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT EXPECT_CALL(*copyer_, DownloadAsync(_)).Times(0); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)).Times(0); - // 不会产生PasteChunkRequest + // No PasteChunkRequest will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, core->HandleReadRequest(readRequest, readRequest->Closure())); @@ -636,7 +636,7 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT char *cloneData = new char[length]; @@ -650,9 +650,9 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -662,12 +662,12 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { core->HandleReadRequest(readRequest, readRequest->Closure())); FakeChunkClosure *closure = reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + // The closure has been forwarded to PasteRequest for processing, and the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the concurrency layer for processing, + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); @@ -678,8 +678,8 @@ TEST_P(CloneCoreTest, RecoverChunkTest2) { } } -// case1: read chunk时,从远端拷贝数据,但是不会产生paste请求 -// case2: recover chunk时,从远端拷贝数据,会产生paste请求 +// Case1: When reading a chunk, copy data from the remote end, but do not generate a pass request +// Case2: When recovering a chunk, copying data from the remote end will generate a pass request TEST_P(CloneCoreTest, DisablePasteTest) { off_t offset = 0; size_t length = 5 * blocksize_; @@ -695,7 +695,7 @@ TEST_P(CloneCoreTest, DisablePasteTest) { // case1 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest(CHUNK_OP_TYPE::CHUNK_OP_READ, offset, length); char *cloneData = new char[length]; @@ -710,12 +710,12 @@ TEST_P(CloneCoreTest, DisablePasteTest) { .Times(2) .WillRepeatedly( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 更新 applied index + // Update applied index EXPECT_CALL(*node_, UpdateAppliedIndex(_)).Times(1); - // 不会产生paste chunk请求 + // No paste chunk request will be generated EXPECT_CALL(*node_, Propose(_)).Times(0); ASSERT_EQ(0, @@ -732,7 +732,7 @@ TEST_P(CloneCoreTest, DisablePasteTest) { // case2 { info.bitmap->Clear(); - // 每次调HandleReadRequest后会被closure释放 + // After each call to HandleReadRequest, it will be released by the closure std::shared_ptr readRequest = GenerateReadRequest( CHUNK_OP_TYPE::CHUNK_OP_RECOVER, offset, length); // NOLINT char *cloneData = new char[length]; @@ -746,9 +746,9 @@ TEST_P(CloneCoreTest, DisablePasteTest) { EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)).Times(0); - // 产生PasteChunkRequest + // Generate PasteChunkRequest braft::Task task; butil::IOBuf iobuf; task.data = &iobuf; @@ -758,12 +758,12 @@ TEST_P(CloneCoreTest, DisablePasteTest) { core->HandleReadRequest(readRequest, readRequest->Closure())); FakeChunkClosure *closure = reinterpret_cast(readRequest->Closure()); - // closure被转交给PasteRequest处理,这里closure还未执行 + // The closure has been forwarded to PasteRequest for processing, and the closure has not been executed yet ASSERT_FALSE(closure->isDone_); CheckTask(task, offset, length, cloneData); - // 正常propose后,会将closure交给并发层处理, - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // After a normal proposal, the closure will be handed over to the concurrency layer for processing, + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); diff --git a/test/chunkserver/clone/clone_manager_test.cpp b/test/chunkserver/clone/clone_manager_test.cpp index f41bc1bed2..e85115d38a 100644 --- a/test/chunkserver/clone/clone_manager_test.cpp +++ b/test/chunkserver/clone/clone_manager_test.cpp @@ -58,32 +58,32 @@ TEST_F(CloneManagerTest, BasicTest) { CloneOptions options; options.checkPeriod = 100; CloneManager cloneMgr; - // 如果线程数设置为0,启动线程池失败 + //If the number of threads is set to 0, starting the thread pool fails { options.threadNum = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 队列深度为0,启动线程池会失败 + //Queue depth is 0, starting thread pool will fail { options.threadNum = 5; options.queueCapacity = 0; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), -1); } - // 线程数和队列深度都大于0,可以启动线程池 + //If the number of threads and queue depth are both greater than 0, the thread pool can be started { options.threadNum = 5; options.queueCapacity = 100; ASSERT_EQ(cloneMgr.Init(options), 0); ASSERT_EQ(cloneMgr.Run(), 0); - // 线程池启动运行后,重复Run直接返回成功 + //After the thread pool starts running, repeating the run directly returns success ASSERT_EQ(cloneMgr.Run(), 0); } - // 通过Fini暂停任务 + //Pause tasks through Fini { ASSERT_EQ(cloneMgr.Fini(), 0); - // 重复Fini直接返回成功 + //Repeated Fini directly returns success ASSERT_EQ(cloneMgr.Fini(), 0); } } @@ -99,9 +99,9 @@ TEST_F(CloneManagerTest, TaskTest) { std::shared_ptr req = std::make_shared(); - // 测试 GenerateCloneTask 和 IssueCloneTask + //Testing GenerateCloneTask and IssueCloneTask { - // options.core为nullptr,则产生的任务也是nullptr + //If options.core is nullptr, the resulting task is also nullptr std::shared_ptr task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_EQ(task, nullptr); @@ -111,53 +111,53 @@ TEST_F(CloneManagerTest, TaskTest) { task = cloneMgr.GenerateCloneTask(req, nullptr); ASSERT_NE(task, nullptr); - // 自定义任务测试 + //Custom task testing task = std::make_shared(); ASSERT_FALSE(task->IsComplete()); - // 如果clone manager还未启动,则无法发布任务 + //If the clone manager has not yet started, the task cannot be published ASSERT_FALSE(cloneMgr.IssueCloneTask(task)); - // 启动以后就可以发布任务 + //After startup, tasks can be published ASSERT_EQ(cloneMgr.Run(), 0); ASSERT_TRUE(cloneMgr.IssueCloneTask(task)); - // 等待一点时间,任务执行完成,检查任务状态以及是否从队列中移除 + //Wait for a moment, the task execution is completed, check the task status and whether it has been removed from the queue std::this_thread::sleep_for( std::chrono::milliseconds(200)); ASSERT_TRUE(task->IsComplete()); - // 无法发布空任务 + //Unable to publish empty task ASSERT_FALSE(cloneMgr.IssueCloneTask(nullptr)); } - // 测试自定义的测试任务 + //Test custom test tasks { - // 初始化执行时间各不相同的任务 + //Initialize tasks with varying execution times std::shared_ptr task1 = std::make_shared(); std::shared_ptr task2 = std::make_shared(); std::shared_ptr task3 = std::make_shared(); task1->SetSleepTime(100); task2->SetSleepTime(300); task3->SetSleepTime(500); - // 同时发布所有任务 + //Publish all tasks simultaneously ASSERT_TRUE(cloneMgr.IssueCloneTask(task1)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task2)); ASSERT_TRUE(cloneMgr.IssueCloneTask(task3)); - // 此时任务还在执行中,此时引用计数为2 + //At this point, the task is still executing and the reference count is 2 ASSERT_FALSE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 等待220ms,task1执行成功,其他还没完成;220ms基本可以保证task1执行完 + //Waiting for 220ms, task1 successfully executed, but other tasks have not been completed yet; 220ms basically guarantees the completion of task1 execution std::this_thread::sleep_for( std::chrono::milliseconds(220)); ASSERT_TRUE(task1->IsComplete()); ASSERT_FALSE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,task2执行成功,task3还在执行中 + //Wait another 200ms, task2 successfully executed, and task3 is still executing std::this_thread::sleep_for( std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); ASSERT_TRUE(task2->IsComplete()); ASSERT_FALSE(task3->IsComplete()); - // 再等待200ms,所有任务执行成功,任务全被移出队列 + //Wait for another 200ms, all tasks are successfully executed, and all tasks are moved out of the queue std::this_thread::sleep_for( std::chrono::milliseconds(200)); ASSERT_TRUE(task1->IsComplete()); diff --git a/test/chunkserver/clone/op_request_test.cpp b/test/chunkserver/clone/op_request_test.cpp index 6746594097..c44d584edd 100644 --- a/test/chunkserver/clone/op_request_test.cpp +++ b/test/chunkserver/clone/op_request_test.cpp @@ -103,7 +103,7 @@ class OpRequestTest }; TEST_P(OpRequestTest, CreateCloneTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -131,7 +131,7 @@ TEST_P(OpRequestTest, CreateCloneTest) { response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -152,12 +152,12 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_EQ(sn, request->sn()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); @@ -168,7 +168,7 @@ TEST_P(OpRequestTest, CreateCloneTest) { opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -176,15 +176,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(true)); braft::Task task; @@ -193,11 +193,11 @@ TEST_P(OpRequestTest, CreateCloneTest) { opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); @@ -264,15 +264,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { closure->response_->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -280,15 +280,15 @@ TEST_P(OpRequestTest, CreateCloneTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -296,27 +296,27 @@ TEST_P(OpRequestTest, CreateCloneTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, CreateCloneChunk(_, _, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, PasteChunkTest) { - // 生成临时的readrequest + // Generate temporary readrequest ChunkResponse *response = new ChunkResponse(); std::shared_ptr readChunkRequest = std::make_shared(node_, @@ -326,7 +326,7 @@ TEST_P(OpRequestTest, PasteChunkTest) { response, nullptr); - // 创建PasteChunkRequest + // Create PasteChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -353,7 +353,7 @@ TEST_P(OpRequestTest, PasteChunkTest) { &cloneData, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -376,12 +376,12 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_STREQ(str.c_str(), data.to_string().c_str()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); @@ -392,7 +392,7 @@ TEST_P(OpRequestTest, PasteChunkTest) { opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -400,15 +400,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { // ASSERT_STREQ(closure->response_->redirect().c_str(), PEER_STRING); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true - * 预期: 会调用Propose,且不会调用closure + * Test Process + * Scenario: node_->IsLeaderTerm() == true + * Expected: Propose will be called and closure will not be called */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(true)); braft::Task task; @@ -417,31 +417,31 @@ TEST_P(OpRequestTest, PasteChunkTest) { opReq->Process(); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(response->has_status()); - // 由于这里node是mock的,因此需要主动来执行task.done.Run来释放资源 + // Since the node here is mock, it is necessary to proactively execute task.done.Run to release resources ASSERT_NE(nullptr, task.done); task.done->Run(); ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:CreateCloneChunk成功 - * 预期:返回 CHUNK_OP_STATUS_SUCCESS ,并更新apply index + * Test OnApply + * Scenario: CreateCloneChunk successful + * Expected: return CHUNK_OP_STATUS_SUCCESS and update the apply index */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -449,36 +449,36 @@ TEST_P(OpRequestTest, PasteChunkTest) { response->status()); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Test OnApply + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -486,15 +486,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { response->status()); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk成功 - * 预期:无返回 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk successful + * Expected: No return */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillOnce(Return(CSErrorCode::Success)); @@ -502,15 +502,15 @@ TEST_P(OpRequestTest, PasteChunkTest) { opReq->OnApplyFromLog(datastore_, *request, data); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回InternalError - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed, returning InternalError + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InternalError)); @@ -518,27 +518,27 @@ TEST_P(OpRequestTest, PasteChunkTest) { ASSERT_DEATH(opReq->OnApplyFromLog(datastore_, *request, data), ""); } /** - * 测试 OnApplyFromLog - * 用例:CreateCloneChunk失败,返回其他错误 - * 预期:进程退出 + * Testing OnApplyFromLog + * Scenario: CreateCloneChunk failed with other errors returned + * Expected: Process Exit */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, PasteChunk(_, _, _, _)) .WillRepeatedly(Return(CSErrorCode::InvalidArgError)); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, ReadChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -565,7 +565,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -585,9 +585,9 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return CHUNK_OP_STATUS_REDIRECTED */ { // set expection @@ -724,16 +724,16 @@ TEST_P(OpRequestTest, ReadChunkTest) { info.bitmap = std::make_shared(chunksize_ / blocksize_); /** - * 测试Process - * 用例: node_->IsLeaderTerm() == true, - * 请求的 apply index 小于等于 node的 apply index - * 预期: 不会走一致性协议,请求提交给concurrentApplyModule_处理 + * Test Process + * Scenario: node_->IsLeaderTerm() == true, + * The requested application index is less than or equal to the node's application index + * Expected: Will not follow the consistency protocol, request submission to ConcurrentApplyModule_ handle */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(true)); @@ -775,20 +775,20 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) @@ -797,7 +797,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -811,21 +811,21 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the request area are all 1 + * Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce( DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) @@ -834,7 +834,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -850,21 +850,21 @@ TEST_P(OpRequestTest, ReadChunkTest) { } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the request area has a bit of 0 + * Expected: Forward request to clone manager for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .Times(0); EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) @@ -874,7 +874,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -883,44 +883,44 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 但是请求中包含源chunk的信息 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * But the request contains information about the source chunk + * Expected: Forward request to clone manager for processing */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .Times(0); EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) @@ -930,7 +930,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -939,24 +939,24 @@ TEST_P(OpRequestTest, ReadChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 请求中包含源chunk的信息 - * 预期:从本地读chunk,返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the request area are all 1 + * The request contains information about the source chunk + * Expected: Chunk read locally, returning CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); request->set_clonefilesource("/test"); request->set_clonefileoffset(0); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files char *chunkData = new char[length]; memset(chunkData, 'a', length); EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) @@ -966,7 +966,7 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -978,66 +978,66 @@ TEST_P(OpRequestTest, ReadChunkTest) { delete[] chunkData; } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:读本地chunk的时候返回错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: Error returned when reading local chunk + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件失败 + //Failed to read chunk file EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .WillRepeatedly(Return(CSErrorCode::InternalError)); ASSERT_DEATH(opReq->OnApply(3, closure), ""); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the request area has a bit of 0 + * Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .Times(0); EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) @@ -1047,29 +1047,29 @@ TEST_P(OpRequestTest, ReadChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } TEST_P(OpRequestTest, RecoverChunkTest) { - // 创建CreateCloneChunkRequest + // Create CreateCloneChunkRequest LogicPoolID logicPoolId = 1; CopysetID copysetId = 10001; uint64_t chunkId = 12345; @@ -1096,7 +1096,7 @@ TEST_P(OpRequestTest, RecoverChunkTest) { response, closure); /** - * 测试Encode/Decode + * Test Encode/Decode */ { butil::IOBuf log; @@ -1116,12 +1116,12 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_EQ(length, request->size()); } /** - * 测试Process - * 用例: node_->IsLeaderTerm() == false - * 预期: 会要求转发请求,返回CHUNK_OP_STATUS_REDIRECTED + * Test Process + * Scenario: node_->IsLeaderTerm() == false + * Expected: Request to forward request and return CHUNK_OP_STATUS_REDIRECTED */ { - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(false)); // PeerId leaderId(PEER_STRING); @@ -1132,7 +1132,7 @@ TEST_P(OpRequestTest, RecoverChunkTest) { opReq->Process(); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, @@ -1154,18 +1154,18 @@ TEST_P(OpRequestTest, RecoverChunkTest) { * expect: don't propose to raft,request commit to concurrentApplyModule_ */ { - // 重置closure + // Reset closure closure->Reset(); info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不读chunk文件 + // Do not read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); - // 设置预期 + // Set expectations EXPECT_CALL(*node_, IsLeaderTerm()) .WillRepeatedly(Return(true)); EXPECT_CALL(*node_, Propose(_)) @@ -1193,26 +1193,26 @@ TEST_P(OpRequestTest, RecoverChunkTest) { } /** - * 测试OnApply - * 用例:请求的 chunk 不是 clone chunk - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is not a clone chunk + * Expected: Directly return to CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = false; EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不读chunk文件 + // Do not read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -1220,27 +1220,27 @@ TEST_P(OpRequestTest, RecoverChunkTest) { response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap都为1 - * 预期:直接返回 CHUNK_OP_STATUS_SUCCESS + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmaps in the request area are all 1 + * Expected: Directly return to CHUNK_OP_STATUS_SUCCESS */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Set(); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 不读chunk文件 + // Do not read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, closure->response_->appliedindex()); ASSERT_TRUE(response->has_status()); @@ -1248,21 +1248,21 @@ TEST_P(OpRequestTest, RecoverChunkTest) { closure->response_->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 预期:将请求转发给clone manager处理 + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the request area has a bit of 0 + * Expected: Forward request to clone manager for processing */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .Times(0); EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) @@ -1272,7 +1272,7 @@ TEST_P(OpRequestTest, RecoverChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_FALSE(closure->isDone_); ASSERT_FALSE(response->has_appliedindex()); ASSERT_FALSE(closure->response_->has_status()); @@ -1281,70 +1281,70 @@ TEST_P(OpRequestTest, RecoverChunkTest) { ASSERT_TRUE(closure->isDone_); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 ChunkNotExistError - * 预期:请求失败,返回 CHUNK_OP_STATUS_CHUNK_NOTEXIST + * Test OnApply + * Scenario: GetChunkInfo returns ChunkNotExistError + * Expected: Request failed, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::ChunkNotExistError)); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response->status()); } /** - * 测试OnApply - * 用例:GetChunkInfo 返回 非ChunkNotExistError错误 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: GetChunkInfo returns a non ChunkNotExistError error + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(Return(CSErrorCode::InternalError)); - // 不会读chunk文件 + // Unable to read chunk files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, offset, length)) .Times(0); opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试OnApply - * 用例:请求的chunk是 clone chunk,请求区域的bitmap存在bit为0 - * 转发请求给clone manager时出错 - * 预期:请求失败,返回 CHUNK_OP_STATUS_FAILURE_UNKNOWN + * Test OnApply + * Scenario: The requested chunk is a clone chunk, and the bitmap in the request area has a bit of 0 + * Error forwarding request to clone manager + * Expected: Request failed, returning CHUNK_OP_STATUS_FAILURE_UNKNOWN */ { - // 重置closure + // Reset closure closure->Reset(); - // 设置预期 + // Set expectations info.isClone = true; info.bitmap->Clear(1); EXPECT_CALL(*datastore_, GetChunkInfo(_, _)) .WillOnce(DoAll(SetArgPointee<1>(info), Return(CSErrorCode::Success))); - // 读chunk文件 + // Reading Chunk Files EXPECT_CALL(*datastore_, ReadChunk(_, _, _, _, _)) .Times(0); EXPECT_CALL(*cloneMgr_, GenerateCloneTask(_, _)) @@ -1354,24 +1354,24 @@ TEST_P(OpRequestTest, RecoverChunkTest) { opReq->OnApply(3, closure); - // 验证结果 + // Verification results ASSERT_TRUE(closure->isDone_); ASSERT_EQ(LAST_INDEX, response->appliedindex()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response->status()); } /** - * 测试 OnApplyFromLog - * 预期:啥也没做 + * Testing OnApplyFromLog + * Expected: Nothing done */ { - // 重置closure + // Reset closure closure->Reset(); butil::IOBuf data; opReq->OnApplyFromLog(datastore_, *request, data); } - // 释放资源 + // Release resources closure->Release(); } diff --git a/test/chunkserver/copyset_epoch_test.cpp b/test/chunkserver/copyset_epoch_test.cpp index f9f80ad50f..41dfe541f0 100644 --- a/test/chunkserver/copyset_epoch_test.cpp +++ b/test/chunkserver/copyset_epoch_test.cpp @@ -81,7 +81,7 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { uint64_t lastIncludeIndex = 0; /** - * 启动一个chunkserver + * Start a chunkserver */ std::string copysetdir = "local://./" + dir1; auto startChunkServerFunc = [&] { @@ -125,10 +125,10 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { confEpochPath1.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath1)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be executed to load the epoch from the snapshot node->Fini(); node->Run(); { @@ -149,10 +149,10 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { confEpochPath2.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath2)); - // 等待生成快照 + // Waiting for snapshot generation ::sleep(2 * snapshotInterval); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be executed to load the epoch from the snapshot node->Fini(); node->Run(); { @@ -173,7 +173,7 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { confEpochPath3.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath3)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be executed to load the epoch from the snapshot node->Fini(); node->Run(); { @@ -194,7 +194,7 @@ TEST_F(CopysetEpochTest, DISABLED_basic) { confEpochPath4.append(kCurveConfEpochFilename); ASSERT_EQ(true, fs->FileExists(confEpochPath4)); - // node关闭重启,会执行load snapshot,从snapshot中加载epoch + // When the node is shut down and restarted, a load snapshot will be executed to load the epoch from the snapshot node->Fini(); node->Run(); { diff --git a/test/chunkserver/copyset_node_manager_test.cpp b/test/chunkserver/copyset_node_manager_test.cpp index 7103ba0697..754d2f52b7 100644 --- a/test/chunkserver/copyset_node_manager_test.cpp +++ b/test/chunkserver/copyset_node_manager_test.cpp @@ -143,7 +143,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); - // 本地 copyset 未加载完成,则无法创建新的copyset + // Cannot create a new copyset if the local copyset has not been loaded completely ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); @@ -152,7 +152,7 @@ TEST_F(CopysetNodeManagerTest, NormalTest) { copysetId, conf)); ASSERT_TRUE(copysetNodeManager->IsExist(logicPoolId, copysetId)); - // 重复创建 + // Duplicate creation ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId, conf)); @@ -185,20 +185,20 @@ TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { std::shared_ptr mockNode = std::make_shared(); - // 测试copyset node manager还没运行 + // The test copyset node manager has not been run yet EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 启动copyset node manager + // Start the copyset node manager ASSERT_EQ(0, copysetNodeManager->Run()); - // 测试node为空 + // Test node is empty EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); EXPECT_CALL(*mockNode, GetLeaderStatus(_)).Times(0); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(nullptr)); - // 测试无法获取到leader status的情况 + // Test the situation where the leader status cannot be obtained EXPECT_CALL(*mockNode, GetStatus(_)).Times(0); NodeStatus leaderStatus; EXPECT_CALL(*mockNode, GetLeaderStatus(_)) @@ -207,7 +207,7 @@ TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); leaderStatus.leader_id.parse("127.0.0.1:9043:0"); - // 测试leader first_index 大于 follower last_index的情况 + // Test the situation that leader first_index greater than follower last_index leaderStatus.first_index = 1000; NodeStatus followerStatus; followerStatus.last_index = 999; @@ -217,7 +217,7 @@ TEST_F(CopysetNodeManagerTest, CheckCopysetTest) { .WillOnce(DoAll(SetArgPointee<0>(leaderStatus), Return(true))); ASSERT_FALSE(copysetNodeManager->CheckCopysetUntilLoadFinished(mockNode)); - // 测试可以获取到leader status,且follower当前不在安装快照 的情况 + // The test can obtain the leader status and the follower is currently not installing the snapshot leaderStatus.first_index = 1; leaderStatus.committed_index = 2000; NodeStatus status1; @@ -258,10 +258,10 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { LOG(FATAL) << "Fail to start Server"; } - // 构造初始环境 + // Construct initial environment ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); ASSERT_EQ(0, copysetNodeManager->Run()); - // 创建多个copyset + // Create multiple copyset int copysetNum = 5; for (int i = 0; i < copysetNum; ++i) { ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolId, @@ -277,7 +277,7 @@ TEST_F(CopysetNodeManagerTest, ReloadTest) { ASSERT_EQ(0, copysetNodes.size()); - // 本地 copyset 未加载完成,则无法创建新的copyset + // Cannot create a new copyset if the local copyset has not been loaded completely ASSERT_FALSE(copysetNodeManager->CreateCopysetNode(logicPoolId, copysetId + 5, conf)); diff --git a/test/chunkserver/copyset_node_test.cpp b/test/chunkserver/copyset_node_test.cpp index 46ed6a4fdb..9190029049 100644 --- a/test/chunkserver/copyset_node_test.cpp +++ b/test/chunkserver/copyset_node_test.cpp @@ -67,7 +67,7 @@ const int port = 9044; class FakeSnapshotReader : public braft::SnapshotReader { public: std::string get_path() { - /* 返回一个不存在的 path */ + /*Returns a non-existent path*/ return std::string("/1002093939/temp/238408034"); } void list_files(std::vector *files) { @@ -84,7 +84,7 @@ class FakeSnapshotReader : public braft::SnapshotReader { class FakeSnapshotWriter : public braft::SnapshotWriter { public: std::string get_path() { - /* 返回一个不存在的 path */ + /*Returns a non-existent path*/ return std::string("."); } void list_files(std::vector *files) { @@ -545,7 +545,7 @@ TEST_F(CopysetNodeTest, error_test) { copysetNode.Fini(); ::system(rmCmd.c_str()); } - /* load: logic pool id 错误 */ + /* Load: logic pool id error */ { LogicPoolID logicPoolID = 123; CopysetID copysetID = 1345; @@ -565,7 +565,7 @@ TEST_F(CopysetNodeTest, error_test) { copysetNode.Fini(); ::system(rmCmd.c_str()); } - /* load: copyset id 错误 */ + /* Load: copyset id error */ { LogicPoolID logicPoolID = 123; CopysetID copysetID = 1345; @@ -607,7 +607,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { conf1.add_peer(peer1); conf2.add_peer(peer1); - // 当前没有在做配置变更 + // There are currently no configuration changes in progress { CopysetNode copysetNode(logicPoolID, copysetID, conf); std::shared_ptr mockNode @@ -628,7 +628,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(0, copysetNode.GetConfChange(&type, &oldConf, &alterPeer)); EXPECT_EQ(ConfigChangeType::NONE, type); } - // 当前正在Add Peer + // Currently adding Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); std::shared_ptr mockNode @@ -666,7 +666,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::ADD_PEER, type); EXPECT_EQ(addPeer.address(), alterPeer.address()); } - // 当前正在Remove Peer + // Currently removing Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); std::shared_ptr mockNode @@ -704,7 +704,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::REMOVE_PEER, type); EXPECT_EQ(removePeer.address(), alterPeer.address()); } - // 当前正在Transfer leader + // Currently transferring leader { CopysetNode copysetNode(logicPoolID, copysetID, conf); std::shared_ptr mockNode @@ -742,7 +742,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::TRANSFER_LEADER, type); EXPECT_EQ(transferee1.address(), alterPeer.address()); } - // 当前正在Change Peer + // Currently changing Peer { CopysetNode copysetNode(logicPoolID, copysetID, conf); std::shared_ptr mockNode @@ -778,7 +778,7 @@ TEST_F(CopysetNodeTest, get_conf_change) { EXPECT_EQ(ConfigChangeType::CHANGE_PEER, type); EXPECT_EQ(addPeer1.address(), alterPeer.address()); } - // leader term小于0 + // leader term is less than 0 { CopysetNode copysetNode(logicPoolID, copysetID, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); @@ -816,7 +816,7 @@ TEST_F(CopysetNodeTest, get_hash) { ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - // 生成多个有数据的文件 + // Generate multiple files with data ::system("echo \"abcddddddddd333\" >" "copyset_node_test/8589934594/data/test-2.txt"); ::system("echo \"mmmmmmmm\" >" @@ -830,7 +830,7 @@ TEST_F(CopysetNodeTest, get_hash) { ::system("echo \"wwwww\" > " "copyset_node_test/8589934594/data/test-1.txt"); - // 获取hash + // Get hash ASSERT_EQ(0, copysetNode.GetHash(&hash)); ASSERT_STREQ(hashValue.c_str(), hash.c_str()); ::system("rm -fr copyset_node_test/8589934594"); @@ -838,12 +838,12 @@ TEST_F(CopysetNodeTest, get_hash) { { std::string hash; - // 使用不同的copyset id,让目录不一样 + // Using different copyset IDs to make the directory different CopysetNode copysetNode(logicPoolID, copysetID + 1, conf); ASSERT_EQ(0, copysetNode.Init(defaultOptions_)); - // 生成多个有数据的文件,并且交换生成文件的顺序 + // Generate multiple files with data and exchange the order of generated files ::system("touch copyset_node_test/8589934595/data/test-1.txt"); ::system("echo \"wwwww\" > " "copyset_node_test/8589934595/data/test-1.txt"); @@ -857,7 +857,7 @@ TEST_F(CopysetNodeTest, get_hash) { ::system("echo \"abcddddddddd333\" > " "copyset_node_test/8589934595/data/test-2.txt"); - // 获取hash + // Get hash ASSERT_EQ(0, copysetNode.GetHash(&hash)); ASSERT_STREQ(hashValue.c_str(), hash.c_str()); ::system("rm -fr copyset_node_test/8589934595"); @@ -1008,7 +1008,7 @@ TEST_F(CopysetNodeTest, get_leader_status) { CopysetNode copysetNode(logicPoolID, copysetID, conf); copysetNode.SetCopysetNode(mockNode); - // 当前peer不是leader,且当前无leader + // The current peer is not a leader, and there is currently no leader { NodeStatus status; EXPECT_CALL(*mockNode, get_status(_)) @@ -1017,7 +1017,7 @@ TEST_F(CopysetNodeTest, get_leader_status) { ASSERT_FALSE(copysetNode.GetLeaderStatus(&leaderStatus)); } - // 当前peer为leader + // The current peer is the leader { NodeStatus status; status.leader_id.parse("127.0.0.1:3200:0"); @@ -1031,9 +1031,9 @@ TEST_F(CopysetNodeTest, get_leader_status) { leaderStatus.committed_index); } - // 存在leader,但不是当前peer + // There is a leader, but it is not the current peer { - // 模拟启动chunkserver + // Simulate starting chunkserver CopysetNodeManager* copysetNodeManager = &CopysetNodeManager::GetInstance(); ASSERT_EQ(0, copysetNodeManager->Init(defaultOptions_)); @@ -1044,14 +1044,14 @@ TEST_F(CopysetNodeTest, get_leader_status) { if (server.Start(port, NULL) != 0) { LOG(FATAL) << "Fail to start Server"; } - // 构造leader copyset + // Construct a leader copyset ASSERT_TRUE(copysetNodeManager->CreateCopysetNode(logicPoolID, copysetID, conf)); auto leaderNode = copysetNodeManager->GetCopysetNode(logicPoolID, copysetID); ASSERT_TRUE(nullptr != leaderNode); - // 设置预期值 + // Set expected values std::shared_ptr mockLeader = std::make_shared(logicPoolID, copysetID); @@ -1064,7 +1064,7 @@ TEST_F(CopysetNodeTest, get_leader_status) { EXPECT_CALL(*mockLeader, get_status(_)) .WillRepeatedly(SetArgPointee<0>(mockLeaderStatus)); - // 测试通过follower的node获取leader的committed index + // Test obtaining the committed index of the leader through the node of the follower NodeStatus followerStatus; followerStatus.leader_id = leader_peer; followerStatus.peer_id.parse("127.0.0.1:3201:0"); diff --git a/test/chunkserver/copyset_service_test.cpp b/test/chunkserver/copyset_service_test.cpp index 973529366b..39fcdaf4ea 100644 --- a/test/chunkserver/copyset_service_test.cpp +++ b/test/chunkserver/copyset_service_test.cpp @@ -128,7 +128,7 @@ TEST_F(CopysetServiceTest, basic) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -149,7 +149,7 @@ TEST_F(CopysetServiceTest, basic) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -169,7 +169,7 @@ TEST_F(CopysetServiceTest, basic) { response.status()); } - /* 非法参数测试 */ + /* Illegal parameter testing */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -228,7 +228,7 @@ TEST_F(CopysetServiceTest, basic) { } TEST_F(CopysetServiceTest, basic2) { - /********************* 设置初始环境 ***********************/ + /********************* Set Up Initial Environment ***********************/ CopysetNodeManager *copysetNodeManager = &CopysetNodeManager::GetInstance(); LogicPoolID logicPoolId = 2; CopysetID copysetId = 100003; @@ -269,9 +269,9 @@ TEST_F(CopysetServiceTest, basic2) { LOG(FATAL) << "Fail to init channel to " << peerId.addr; } - /********************** 跑测试cases ************************/ + /********************** Run Test Cases ************************/ - /* 测试创建一个新的 copyset */ + /* Test creating a new copyset */ CopysetService_Stub stub(&channel); { brpc::Controller cntl; @@ -298,7 +298,7 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - /* 测试创建一个重复 copyset */ + /* Test creating a duplicate copyset */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -324,7 +324,7 @@ TEST_F(CopysetServiceTest, basic2) { response.status()); } - /* 创建多个copyset */ + /* Create multiple copysets */ { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -332,7 +332,7 @@ TEST_F(CopysetServiceTest, basic2) { CopysetRequest2 request; CopysetResponse2 response; - // 准备第1个copyset + // Prepare the first copyset { Copyset *copyset; copyset = request.add_copysets(); @@ -346,7 +346,7 @@ TEST_F(CopysetServiceTest, basic2) { peer3->set_address("127.0.0.1:9042:0"); } - // 准备第2个copyset + // Prepare the second copyset { Copyset *copyset; copyset = request.add_copysets(); @@ -370,7 +370,7 @@ TEST_F(CopysetServiceTest, basic2) { // get status { - // 创建一个copyset + // Create a copyset { brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -392,11 +392,11 @@ TEST_F(CopysetServiceTest, basic2) { COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS); } - // 睡眠等待leader产生 + // Sleep waiting for leader generation ::usleep(2 * 1000 * 1000); { - // query hash为false + // query hash is false std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); @@ -432,7 +432,7 @@ TEST_F(CopysetServiceTest, basic2) { ASSERT_FALSE(response.has_hash()); } { - // query hash为true + // query hash is true std::string peerStr("127.0.0.1:9040:0"); brpc::Controller cntl; cntl.set_timeout_ms(3000); diff --git a/test/chunkserver/datastore/datastore_mock_unittest.cpp b/test/chunkserver/datastore/datastore_mock_unittest.cpp index 5910df808e..74f455162d 100644 --- a/test/chunkserver/datastore/datastore_mock_unittest.cpp +++ b/test/chunkserver/datastore/datastore_mock_unittest.cpp @@ -158,11 +158,11 @@ class CSDataStore_test } /** - * 构造初始环境 - * datastore存在两个chunk,分别为chunk1、chunk2 - * chunk1 和 chunk2的sn都为2,correctSn为0 - * chunk1存在快照文件,快照文件版本号为1 - * chunk2不存在快照文件 + * Construct initial environment + * There are two chunks in the datastore, chunk1 and chunk2 + * The sn of chunk1 and chunk2 are both 2, and correctSn is 0 + * chunk1 has a snapshot file with version number 1 + * chunk2 does not have a snapshot file */ void FakeEnv() { // fake DirExists @@ -267,8 +267,8 @@ class CSDataStore_test }; /** * ConstructorTest - * case:测试构造参数为空的情况 - * 预期结果:进程退出 + * Case: Test the case where the construction parameter is empty + * Expected result: Process exited */ TEST_P(CSDataStore_test, ConstructorTest) { // null param test @@ -294,8 +294,8 @@ TEST_P(CSDataStore_test, ConstructorTest) { /** * InitializeTest - * case:存在未知类型的文件 - * 预期结果:删除该文件,返回true + * Case: There is an unknown type of file + * Expected result: Delete the file and return true */ TEST_P(CSDataStore_test, InitializeTest1) { // test unknown file @@ -316,8 +316,8 @@ TEST_P(CSDataStore_test, InitializeTest1) { /** * InitializeTest - * case:存在快照文件,但是快照文件没有对应的chunk - * 预期结果:删除快照文件,返回true + * Case: There is a snapshot file, but the snapshot file does not have a corresponding chunk + * Expected result: Delete the snapshot file and return true */ TEST_P(CSDataStore_test, InitializeTest2) { // test snapshot without chunk @@ -337,8 +337,8 @@ TEST_P(CSDataStore_test, InitializeTest2) { /** * InitializeTest - * case:存在chunk文件,chunk文件存在快照文件 - * 预期结果:正常加载文件,返回true + * Case: Chunk file exists, Chunk file has snapshot file + * Expected result: Loading the file normally, returning true */ TEST_P(CSDataStore_test, InitializeTest3) { // test chunk with snapshot @@ -354,9 +354,9 @@ TEST_P(CSDataStore_test, InitializeTest3) { /** * InitializeTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * 预期结果:返回true + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Expected result: Returns true */ TEST_P(CSDataStore_test, InitializeTest4) { // test snapshot founded before chunk file , @@ -379,8 +379,8 @@ TEST_P(CSDataStore_test, InitializeTest4) { /** * InitializeTest - * case:存在chunk文件,chunk文件存在两个冲突的快照文件 - * 预期结果:返回false + * Case: There is a chunk file, and there are two conflicting snapshot files in the chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeTest5) { // test snapshot conflict @@ -402,8 +402,8 @@ TEST_P(CSDataStore_test, InitializeTest5) { /** * InitializeErrorTest - * case:data目录不存在,创建目录时失败 - * 预期结果:返回false + * Case: The data directory does not exist, creating the directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest1) { // dir not exist and mkdir failed @@ -421,8 +421,8 @@ TEST_P(CSDataStore_test, InitializeErrorTest1) { /** * InitializeErrorTest - * case:List目录时失败 - * 预期结果:返回false + * Case: List directory failed + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest2) { // List dir failed @@ -441,8 +441,8 @@ TEST_P(CSDataStore_test, InitializeErrorTest2) { /** * InitializeErrorTest - * case:open chunk文件的时候出错 - * 预期结果:返回false + * Case: Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest3) { // test chunk open failed @@ -537,8 +537,8 @@ TEST_P(CSDataStore_test, InitializeErrorTest3) { /** * InitializeErrorTest - * case:open 快照文件的时候出错 - * 预期结果:返回false + * Case: Error opening snapshot file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest4) { // test chunk open failed @@ -548,7 +548,7 @@ TEST_P(CSDataStore_test, InitializeErrorTest4) { .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 + // Each reinitialization will release the original resources and reload them EXPECT_CALL(*lfs_, Close(1)) .WillOnce(Return(0)); // open success @@ -562,7 +562,7 @@ TEST_P(CSDataStore_test, InitializeErrorTest4) { .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 + // Each reinitialization will release the original resources and reload them EXPECT_CALL(*lfs_, Close(1)) .WillOnce(Return(0)); // open success @@ -579,7 +579,7 @@ TEST_P(CSDataStore_test, InitializeErrorTest4) { Return(0))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 + // Each reinitialization will release the original resources and reload them EXPECT_CALL(*lfs_, Close(1)) .WillOnce(Return(0)); // open success @@ -598,7 +598,7 @@ TEST_P(CSDataStore_test, InitializeErrorTest4) { .WillOnce(Return(-UT_ERRNO)); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 + // Each reinitialization will release the original resources and reload them EXPECT_CALL(*lfs_, Close(1)) .WillOnce(Return(0)); // open success @@ -621,7 +621,7 @@ TEST_P(CSDataStore_test, InitializeErrorTest4) { Return(metapagesize_))); EXPECT_FALSE(dataStore->Initialize()); - // 每次重新初始化都会释放原先的资源,重新加载 + // Each reinitialization will release the original resources and reload them EXPECT_CALL(*lfs_, Close(1)) .WillOnce(Return(0)); // open success @@ -651,10 +651,10 @@ TEST_P(CSDataStore_test, InitializeErrorTest4) { /** * InitializeErrorTest - * case:存在chunk文件,chunk文件存在snapshot文件, - * List的时候snapshot先于chunk文件被list - * open chunk文件的时候出错 - * 预期结果:返回false + * Case: There is a chunk file, and there is a snapshot file in the chunk file, + * When listing, snapshots are listed before chunk files + * Error opening chunk file + * Expected result: returns false */ TEST_P(CSDataStore_test, InitializeErrorTest5) { // test snapshot founded before chunk file , @@ -676,8 +676,8 @@ TEST_P(CSDataStore_test, InitializeErrorTest5) { /** * Test - * case:chunk 不存在 - * 预期结果:创建chunk文件,并成功写入数据 + * Case: chunk does not exist + * Expected result: Create chunk file and successfully write data */ TEST_P(CSDataStore_test, WriteChunkTest1) { // initialize @@ -694,7 +694,7 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { string chunk3Path = string(baseDir) + "/" + FileNameOperator::GenerateChunkFileName(id); - // 如果sn为0,返回InvalidArgError + // If sn is 0, returns InvalidArgError EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->WriteChunk(id, 0, buf, @@ -757,8 +757,8 @@ TEST_P(CSDataStore_test, WriteChunkTest1) { /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的sn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request sn smaller than chunk's sn + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest2) { // initialize @@ -814,8 +814,8 @@ TEST_P(CSDataStore_test, WriteChunkTest2) { /** * WriteChunkTest - * case:chunk存在,请求sn小于chunk的correctedSn - * 预期结果:拒绝写入,返回BackwardRequestError + * Case: chunk exists, request correctedSn with sn less than chunk + * Expected result: Refused writing, returned BackwardRequestError */ TEST_P(CSDataStore_test, WriteChunkTest3) { // initialize @@ -876,9 +876,9 @@ TEST_P(CSDataStore_test, WriteChunkTest3) { /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk不存在快照 - * 预期结果:直接写数据到chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not less than correctSn + * chunk does not have a snapshot + * Expected result: Directly write data to chunk file */ TEST_P(CSDataStore_test, WriteChunkTest4) { // initialize @@ -957,9 +957,9 @@ TEST_P(CSDataStore_test, WriteChunkTest4) { /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn, - * chunk不存在快照 - * 预期结果:会更新metapage,然后写数据到chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to correctSn, + * chunk does not have a snapshot + * Expected result: Metapage will be updated and data will be written to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest6) { // initialize @@ -1011,10 +1011,10 @@ TEST_P(CSDataStore_test, WriteChunkTest6) { /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn以及correctSn, - * chunk不存在快照、 - * 预期结果:会创建快照文件,更新metapage, - * 写数据时先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn greater than Chunk's sn and correctSn, + * chunk does not have a snapshot + * Expected result: A snapshot file will be created, and the metapage will be updated, + * When writing data, first perform a Copy-On-Write operation to the snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest7) { // initialize @@ -1077,7 +1077,7 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写同一个block的数据,不再进行cow,而是直接写入数据 + // Write data for the same block again, no longer co w, but directly write the data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) .Times(1); @@ -1089,7 +1089,7 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { length, nullptr)); - // sn - 1 < chunk.sn , 返回 BackwardRequestError + // sn - 1 < chunk. sn, returns BackwardRequestError EXPECT_EQ(CSErrorCode::BackwardRequestError, dataStore->WriteChunk(id, sn - 1, @@ -1111,9 +1111,9 @@ TEST_P(CSDataStore_test, WriteChunkTest7) { /** * WriteChunkTest - * case:chunk存在,请求sn等于chunk的sn且不小于correctSn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, request sn to be equal to the SN of the chunk and not less than correctSn + * chunk has a snapshot + * Expected result: When writing data, first perform a Copy-On-Write operation to the snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest9) { // initialize @@ -1165,9 +1165,9 @@ TEST_P(CSDataStore_test, WriteChunkTest9) { /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn,等于correctSn - * chunk存在快照 - * 预期结果:更新metapage,然后写chunk文件 + * Case: chunk exists, request sn is greater than the sn of the chunk, equal to correctSn + * chunk has a snapshot + * Expected result: Update the metapage and write the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest10) { // initialize @@ -1220,9 +1220,9 @@ TEST_P(CSDataStore_test, WriteChunkTest10) { /** * WriteChunkTest - * case:chunk存在,请求sn大于chunk的sn和correctSn - * chunk存在快照,snapsnGetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1352,7 +1352,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case2:chunk存在,且是clone chunk,写入区域之前已写过 + // Case2: chunk exists and is a clone chunk, which has been written before writing to the region { LOG(INFO) << "case 3"; id = 3; // not exist @@ -1372,7 +1372,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { offset, length, nullptr)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -1380,7 +1380,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case3: chunk exists and is a clone chunk. Some areas have been written, while others have not { LOG(INFO) << "case 4"; id = 3; // not exist @@ -1389,8 +1389,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1405,7 +1405,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { offset, length, nullptr)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); @@ -1413,7 +1413,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case4:遍写整个chunk + // Case4: Overwrite the entire chun { LOG(INFO) << "case 5"; id = 3; // not exist @@ -1422,8 +1422,8 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { std::unique_ptr buf(new char[length]); - // [blocksize_, 4 * blocksize_)区域已写过 - // [0, metapagesize_)为metapage + // The [blocksize_, 4 * blocksize_) area has been written + // [0, metapagesize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1438,7 +1438,7 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { offset, length, nullptr)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); @@ -1457,15 +1457,15 @@ TEST_P(CSDataStore_test, WriteChunkTest13) { /** * WriteChunkTest - * 写clone chunk,模拟恢复 - * case1:clone chunk 存在,snchunk.sn,sn==chunk.correctedsn - * 预期结果2:写入数据并更新bitmap,更新chunk.sn为sn - * case3:clone chunk存在,sn==chunk.sn,sn==chunk.correctedsn - * 预期结果3:写入数据并更新bitmap - * case4:clone chunk 存在,sn>chunk.sn, sn>chunk.correctedsn - * 预期结果4:返回StatusConflictError + * Write clone chunk to simulate recovery + * Case1: clone chunk exists, snchunk.sn, sn==chunk.correctedsn + * Expected result 2: Write data and update bitmap, update chunk.sn to sn + * Case3: clone chunk exists, sn==chunk.sn, sn==chunk.correctedsn + * Expected result 3: Write data and update bitmap + * Case4: clone chunk exists, sn>chunk.sn, sn>chunk.correctedsn + * Expected result 4: Returning StatusConflictError */ TEST_P(CSDataStore_test, WriteChunkTest14) { // initialize @@ -1480,7 +1480,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -1518,7 +1518,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case1:clone chunk存在 + // Case1: clone chunk exists { LOG(INFO) << "case 1"; // sn == chunk.sn, sn < chunk.correctedSn @@ -1543,7 +1543,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { nullptr)); } - // case2:chunk存在,且是clone chunk, + // Case2: chunk exists and is a clone chunk, { LOG(INFO) << "case 2"; id = 3; @@ -1565,7 +1565,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { offset, length, nullptr)); - // 检查paste后chunk的状态 + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1576,7 +1576,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn == correctedsn { LOG(INFO) << "case 3"; @@ -1585,8 +1585,8 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { std::unique_ptr buf(new char[length]); - // [2 * blocksize_, 4 * blocksize_)区域已写过 - // [0, blocksize_)为metapage + // The [2 * blocksize_, 4 * blocksize_) area has been written + // [0, blocksize_) is the metapage EXPECT_CALL(*lfs_, Write(4, Matcher(_), offset + metapagesize_, length)) .Times(1); @@ -1601,7 +1601,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { offset, length, nullptr)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1612,12 +1612,12 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case3:chunk存在,且是clone chunk + // Case3: chunk exists and is a clone chunk // sn > chunk.sn;sn > correctedsn { LOG(INFO) << "case 4"; sn = 4; - // 不会写数据 + // Unable to write data EXPECT_CALL(*lfs_, Write(4, Matcher(_), _, _)) .Times(0); @@ -1630,7 +1630,7 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { offset, length, nullptr)); - // chunk的状态不变 + // The state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(3, info.curSn); ASSERT_EQ(3, info.correctedSn); @@ -1654,12 +1654,12 @@ TEST_P(CSDataStore_test, WriteChunkTest14) { /** * WriteChunkTest - * case:chunk存在, + * Case: chunk exists, * sn==chunk.sn * sn>chunk.correctedSn * chunk.snchunk.sn - * sn>chunk.correctedSn - * chunk.sn==snap.sn - * chunk存在快照 - * 预期结果:先cow到snapshot,再写chunk文件 + * Case: chunk exists, + * sn>chunk.sn + * sn>chunk.correctedSn + * chunk.sn==snap.sn + * chunk has a snapshot + * Expected result: When writing data, first perform a Copy-On-Write operation to the snapshot, and then write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkTest16) { // initialize @@ -1777,9 +1777,9 @@ TEST_P(CSDataStore_test, WriteChunkTest16) { } /** - * WriteChunkTest 异常测试 - * case:创建快照文件时出错 - * 预期结果:写失败,不会改变当前chunk状态 + * WriteChunkTest exception test + * Case: Error creating snapshot file + * Expected result: Write failed and will not change the current chunk state */ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { // initialize @@ -1862,10 +1862,10 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest1) { } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage失败 - * 预期结果:写失败,产生快照文件,但是chunk版本号不会改变 - * 再次写入,不会生成新的快照文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, failed to update metadata + * Expected result: Write failed, resulting in a snapshot file, but the chunk version number will not change + * Write again without generating a new snapshot file */ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { // initialize @@ -1926,10 +1926,10 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest2) { } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap未发生变化,再次写入,仍会进行cow + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metadata, and failed row + * Expected result: Write failed, snapshot file generated, chunk version number changed, + * The bitmap of the snapshot has not changed. If written again, it will still be cowed */ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { // initialize @@ -2024,7 +2024,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); - // 再次写入仍会cow + // Writing again will still slow down // will copy on write LOG(INFO) << "case 4"; EXPECT_CALL(*lfs_, Read(3, NotNull(), metapagesize_ + offset, length)) @@ -2062,10 +2062,10 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest3) { } /** - * WriteChunkTest 异常测试 - * case:创建快照文件成功,更新metapage成功,cow成功,写数据失败 - * 预期结果:写失败,产生快照文件,chunk版本号发生变更, - * 快照的bitmap发生变化,再次写入,直接写chunk文件 + * WriteChunkTest exception test + * Case: Successfully created snapshot file, updated metapage, row, and write data failed + * Expected result: Write failed, snapshot file generated, chunk version number changed, + * The bitmap of the snapshot has changed, write it again and directly write to the chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { // initialize @@ -2122,7 +2122,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { offset, length, nullptr)); - // 再次写入直接写chunk文件 + // Write directly to the chunk file again // will write data EXPECT_CALL(*lfs_, Write(3, Matcher(_), metapagesize_ + offset, length)) @@ -2147,8 +2147,8 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest4) { /** * WriteChunkTest - * case:chunk 不存在 - * 预期结果:创建chunk文件的时候失败 + * Case: chunk does not exist + * Expected result: Failed to create chunk file */ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { // initialize @@ -2266,13 +2266,13 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest5) { /* * WriteChunkErrorTest - * 所写chunk为clone chunk - * case1:请求location过长,导致metapage size超出page size - * 预期结果1:create clone chunk失败 - * case2:写数据时失败 - * 预期结果2:返回InternalError,chunk状态不变 - * case3:更新metapage时失败 - * 预期结果3:返回InternalError,chunk状态不变 + * The chunk written is a clone chunk + * Case1: The request location is too long, causing the metapage size to exceed the page size + * Expected result 1: Create clone chunk failed + * Case2: Failed to write data + * Expected result 2: InternalError returned, chunk status remains unchanged + * Case3: Failed to update metapage + * Expected result 3: InternalError returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { // initialize @@ -2287,7 +2287,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { char buf[length]; // NOLINT memset(buf, 0, sizeof(buf)); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { string longLocation(kLocationLimit+1, 'a'); EXPECT_EQ(CSErrorCode::InvalidArgError, @@ -2297,7 +2297,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { chunksize_, longLocation)); } - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -2327,7 +2327,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -2346,12 +2346,12 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { offset, length, nullptr)); - // 检查paste后chunk的状态 + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -2370,7 +2370,7 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { offset, length, nullptr)); - // 检查paste后chunk的状态 + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); @@ -2388,8 +2388,8 @@ TEST_P(CSDataStore_test, WriteChunkErrorTest6) { /** * ReadChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest1) { // initialize @@ -2420,8 +2420,8 @@ TEST_P(CSDataStore_test, ReadChunkTest1) { /** * ReadChunkTest - * case:chunk存在,读取区域超过chunk大小或者offset和length未对齐 - * 预期结果:返回InvalidArgError错误码 + * Case: chunk exists, reading area exceeds chunk size or offset and length are not aligned + * Expected result: InvalidArgError error code returned */ TEST_P(CSDataStore_test, ReadChunkTest2) { // initialize @@ -2470,8 +2470,8 @@ TEST_P(CSDataStore_test, ReadChunkTest2) { /** * ReadChunkTest - * case:正常读取存在的chunk - * 预期结果:读取成功 + * Case: Normal reading of existing chunks + * Expected result: read successfully */ TEST_P(CSDataStore_test, ReadChunkTest3) { // initialize @@ -2505,13 +2505,13 @@ TEST_P(CSDataStore_test, ReadChunkTest3) { /** * ReadChunkTest - * 读取 clone chunk - * case1:读取区域未被写过 - * 预期结果:返回PageNerverWrittenError - * case2:读取区域部分被写过 - * 预期结果:返回PageNerverWrittenError - * case3:读取区域已被写过 - * 预期结果:返回Success,数据成功写入 + * Read clone chunk + * Case1: The read area has not been written + * Expected result: PageNerverWrittenError returned + * Case2: The read area part has been written + * Expected result: PageNerverWrittenError returned + * Case3: The read area has been written + * Expected result: Success returned, data successfully written */ TEST_P(CSDataStore_test, ReadChunkTest4) { // initialize @@ -2551,7 +2551,7 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { chunksize_, location)); - // case1: 读取未写过区域 + // Case1: Read unwritten area off_t offset = 1 * blocksize_; size_t length = blocksize_; char buf[2 * length]; // NOLINT @@ -2565,7 +2565,7 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { offset, length)); - // case2: 读取区域部分被写过 + // Case2: The read area part has been written offset = 0; length = 2 * blocksize_; EXPECT_CALL(*lfs_, Read(_, _, _, _)) @@ -2577,7 +2577,7 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { offset, length)); - // case3: 读取区域已写过 + // Case3: The read area has been written offset = 0; length = blocksize_; EXPECT_CALL(*lfs_, Read(4, NotNull(), offset + metapagesize_, length)) @@ -2601,8 +2601,8 @@ TEST_P(CSDataStore_test, ReadChunkTest4) { /** * ReadChunkErrorTest - * case:读chunk文件时出错 - * 预期结果:读取失败,返回InternalError + * Case: Error reading chunk file + * Expected result: Read failed, returned InternalError */ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { // initialize @@ -2636,8 +2636,8 @@ TEST_P(CSDataStore_test, ReadChunkErrorTest1) { /** * ReadSnapshotChunkTest - * case:chunk不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { // initialize @@ -2669,8 +2669,8 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest1) { /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于chunk版本号 - * 预期结果:读chunk的数据 + * Case: chunk exists, request version number equal to Chunk version number + * Expected result: Read chunk data */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { // initialize @@ -2731,8 +2731,8 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest2) { /** * ReadSnapshotChunkTest - * case:chunk存在,请求版本号等于snapshot版本号 - * 预期结果:读快照的数据 + * Case: chunk exists, request version number equal to snapshot version number + * Expected result: Read data from snapshot */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { // initialize @@ -2810,8 +2810,8 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest3) { /** * ReadSnapshotChunkTest - * case:chunk存在,但是请求的版本号不存在 - * 预期结果:返回ChunkNotExistError错误码 + * Case: chunk exists, but the requested version number does not exist + * Expected result: ChunkNotExistError error code returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { // initialize @@ -2843,8 +2843,8 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkTest4) { /** * ReadSnapshotChunkErrorTest - * case:读快照时失败 - * 预期结果:返回InternalError + * Case: Failed to read snapshot + * Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { // initialize @@ -2919,8 +2919,8 @@ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest1) { /** * ReadSnapshotChunkErrorTest - * case:chunk存在,请求版本号等于chunk版本号,读数据时失败 - * 预期结果:返回InternalError + * Case: chunk exists, request version number is equal to Chunk version number, failed while reading data + * Expected result: InternalError returned */ TEST_P(CSDataStore_test, ReadSnapshotChunkErrorTest2) { // initialize @@ -3010,8 +3010,8 @@ TEST_P(CSDataStore_test, ReadChunkMetaDataTest2) { /** * DeleteChunkTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest1) { // initialize @@ -3035,8 +3035,8 @@ TEST_P(CSDataStore_test, DeleteChunkTest1) { /** * DeleteChunkTest - * case:chunk存在快照文件 - * 预期结果:返回Success, chunk被删除,快照被删除 + * Case: Chunk has a snapshot file present + * Expected result: Success returned, chunk deleted, snapshot deleted */ TEST_P(CSDataStore_test, DeleteChunkTest2) { // initialize @@ -3063,8 +3063,8 @@ TEST_P(CSDataStore_test, DeleteChunkTest2) { } /** - * case:chunk存在,快照文件不存在 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkTest3) { // initialize @@ -3091,11 +3091,11 @@ TEST_P(CSDataStore_test, DeleteChunkTest3) { /** * DeleteChunkTest - * chunk存在,快照文件不存在 - * case1: snchunkinfo.sn - * 预期结果2:返回成功 + * chunk exists, snapshot file does not exist + * Case1: snchunkinfo.sn + * Expected result 2: Success returned */ TEST_P(CSDataStore_test, DeleteChunkTest4) { // initialize @@ -3136,8 +3136,8 @@ TEST_P(CSDataStore_test, DeleteChunkTest4) { /** * DeleteChunkErrorTest - * case:chunk存在,快照文件不存在,recyclechunk时出错 - * 预期结果:返回成功 + * Case: chunk exists, snapshot file does not exist, error occurred during recyclechunk + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { // initialize @@ -3163,8 +3163,8 @@ TEST_P(CSDataStore_test, DeleteChunkErrorTest1) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk不存在 - * 预期结果:返回成功 + * Case: chunk does not exist + * Expected result: returned successfully */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { // initialize @@ -3185,19 +3185,19 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest1) { .Times(1); } -// 对于DeleteSnapshotChunkOrCorrectSn来说,内部主要有两个操作 -// 一个是删除快照文件,一个是修改correctedSn -// 当存在快照文件时,fileSn>=chunk的sn是判断是否要删除快照的唯一条件 -// 对于correctedSn来说,fileSn大于chunk的sn以及correctedSn是判断 -// 是否要修改correctedSn的唯一条件 +// For DeleteSnapshotChunkOrCorrectSn, there are two main internal operations +// One is to delete the snapshot file, and the other is to modify correctedSn +// When there is a snapshot file, the sn of fileSn>=chunk is the only condition to determine whether to delete the snapshot +// For correctedSn, if fileSn is greater than chunk's sn and correctedSn is the judgment +// Do you want to modify the unique condition for correctedSn /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn >= chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn>snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn>=Chunk's sn + * fileSn==correctedSn of chunk + * chunk.sn>snap.sn + * Expected result: Delete snapshot without modifying correctedSn, return success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { // initialize @@ -3236,10 +3236,10 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest2) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn < chunk的sn - * 此时无论correctSn为何值都不会修改correctedSn - * 预期结果:返回成功,不会删除快照,不会修改correctedSn + * Case: chunk exists, snapshot exists + * fileSn < chunk's sn + * At this point, regardless of the value of correctSn, correctedSn will not be modified + * Expected result: Success returned, snapshot will not be deleted, correctedSn will not be modified */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { // initialize @@ -3267,8 +3267,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { EXPECT_EQ(CSErrorCode::BackwardRequestError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, fileSn)); - // 下则用例用于补充DeleteSnapshotChunkOrCorrectSnTest2用例中 - // 当 fileSn == sn 时的边界情况 + // The following use case is used to supplement the DeleteSnapshotChunkOrCorrectSnTest2 use case + // Boundary situation when fileSn == sn // fileSn == sn // fileSn > correctedSn fileSn = 3; @@ -3293,9 +3293,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest3) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn以及correctedSn - * 预期结果:删除快照,并修改correctedSn,返回成功 + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn and correctedSn + * Expected result: Delete the snapshot and modify correctedSn, returning success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { // initialize @@ -3327,9 +3327,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest4) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn <= chunk的sn或correctedSn - * 预期结果:不会修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn <= SN or correctedSn of chunk + * Expected result: CorrectedSn will not be modified, returning success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { // initialize @@ -3357,9 +3357,9 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest5) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在 - * fileSn > chunk的sn及correctedSn - * 预期结果:修改correctedSn,返回成功 + * Case: chunk exists, snapshot does not exist + * fileSn > chunk's sn and correctedSn + * Expected result: Modify correctedSn and return success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { // initialize @@ -3387,8 +3387,8 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest6) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot不存在,chunk为clone chunk - * 预期结果:返回StatusConflictError + * Case: chunk exists, snapshot does not exist, chunk is clone chunk + * Expected result: Returning StatusConflictError */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { // initialize @@ -3427,7 +3427,7 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { chunksize_, location)); - // 无论correctedSn为多少,都返回StatusConflictError + // Returns StatusConflictError regardless of the number of correctedSn EXPECT_EQ(CSErrorCode::StatusConflictError, dataStore->DeleteSnapshotChunkOrCorrectSn(id, 1)); EXPECT_EQ(CSErrorCode::StatusConflictError, @@ -3451,11 +3451,11 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest7) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn > chunk的sn - * fileSn > chunk的correctedSn + * Case: chunk exists, snapshot exists + * fileSn > chunk's sn + * fileSn > chunk's correctedSn * chunk.sn==snap.sn - * 预期结果:删除快照,不会修改correctedSn,返回成功 + * Expected result: Delete snapshot without modifying correctedSn, return success */ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { // initialize @@ -3503,11 +3503,11 @@ TEST_P(CSDataStore_test, DeleteSnapshotChunkOrCorrectSnTest8) { /** * DeleteSnapshotChunkOrCorrectSnTest - * case:chunk存在,snapshot存在 - * fileSn == chunk的sn - * fileSn == chunk的correctedSn - * chunk.sn bitmap = make_shared(chunksize_ / blocksize_); FakeEncodeChunk(chunk3MetaPage, correctedSn, sn, bitmap, location); - // case1:输入错误的参数 + // Case1: Input incorrect parameters { // size != chunksize EXPECT_EQ(CSErrorCode::InvalidArgError, @@ -3679,7 +3679,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { "")); } - // case2:指定的chunk不存在,指定chunksize与配置一致 + // Case2: The specified chunk does not exist, the specified chunksize is consistent with the configuration { // create new chunk and open it string chunk3Path = string(baseDir) + "/" + @@ -3703,7 +3703,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3714,7 +3714,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case3:指定的chunk存在,参数与原chunk一致 + // Case3: The specified chunk exists, and the parameters are consistent with the original chunk { EXPECT_EQ(CSErrorCode::Success, dataStore->CreateCloneChunk(id, @@ -3722,7 +3722,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3733,31 +3733,31 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case4:指定的chunk存在,参数与原chunk不一致 - // 返回ChunkConflictError,但是不会改变原chunk信息 + // Case4: The specified chunk exists, and the parameters are inconsistent with the original chunk + // Returns ChunkConflictError, but does not change the original chunk information { - // 版本不一致 + //Version inconsistency EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(id, sn + 1, correctedSn, chunksize_, location)); - // correctedSn不一致 + // Inconsistent correctedSn EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(id, sn, correctedSn + 1, chunksize_, location)); - // location不一致 + // Inconsistent location EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(id, sn, correctedSn, chunksize_, "temp")); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3768,8 +3768,8 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case5:指定的chunk存在,指定chunksize与配置不一致 - // 返回InvalidArgError,但是不会改变原chunk信息 + // Case5: The specified chunk exists, but the specified chunksize is inconsistent with the configuration + // Returns InvalidArgError, but does not change the original chunk information { EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->CreateCloneChunk(id, @@ -3777,7 +3777,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { correctedSn, chunksize_ + metapagesize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(id, info.chunkId); ASSERT_EQ(sn, info.curSn); @@ -3788,9 +3788,9 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case6:已存在chunk,chunk不是clone chunk + // Case6: Chunk already exists, chunk is not a clone chunk { - // location 为空 + // location is empty EXPECT_EQ(CSErrorCode::InvalidArgError, dataStore->CreateCloneChunk(1, // id 2, // sn @@ -3798,7 +3798,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { chunksize_, "")); - // location 不为空 + // location is not empty EXPECT_EQ(CSErrorCode::ChunkConflictError, dataStore->CreateCloneChunk(1, // id 2, // sn @@ -3819,8 +3819,8 @@ TEST_P(CSDataStore_test, CreateCloneChunkTest) { /** * CreateCloneChunkErrorTest - * case:chunk不存在,调chunkFile->Open的时候失败 - * 预期结果:创建clone chunk失败 + * Case: chunk does not exist, failed when calling chunkFile->Open + * Expected result: Failed to create clone chunk */ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { // initialize @@ -3845,7 +3845,7 @@ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { correctedSn, chunksize_, location)); - // 检查生成的clone chunk信息 + // Check the generated clone chunk information ASSERT_EQ(CSErrorCode::ChunkNotExistError, dataStore->GetChunkInfo(id, &info)); @@ -3859,20 +3859,20 @@ TEST_P(CSDataStore_test, CreateCloneChunkErrorTest) { /** * PasteChunkTedt - * case1:chunk 不存在 - * 预期结果1:返回ChunkNotExistError - * case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 - * 预期结果2:返回InvalidArgError - * case3:chunk存在,但不是clone chunk - * 预期结果3:返回成功 - * case4:chunk存在,且是clone chunk,写入区域之前未写过 - * 预期结果4:写入数据并更新bitmap - * case5:chunk存在,且是clone chunk,写入区域之前已写过 - * 预期结果5:无数据写入,且不会更新bitmap - * case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 - * 预期结果6:只写入未写过数据,并更新bitmap - * case7:遍写整个chunk - * 预期结果7:数据写入未写过区域,然后clone chunk会被转为普通chunk + * Case1: Chunk does not exist + * Expected result 1: ChunkNotExistError returned + * Case2: chunk exists, requested offset exceeds chunk file size or offset length is not aligned + * Expected result 2: InvalidArgError returned + * Case3: chunk exists, but not clone chunk + * Expected result 3: Success returned + * Case4: chunk exists and is a clone chunk, which has not been written before writing to the region + * Expected result 4: Write data and update bitmap + * Case5: chunk exists and is a clone chunk, which has been written before writing to the region + * Expected result 5: No data written and Bitmap will not be updated + * Case6: chunk exists and is a clone chunk. Some areas have been written, while others have not + * Expected result 6: Only write unwritten data and update bitmap + * Case7: Overwrite the entire chunk + * Expected result 7: Data is written to an unwritten area, and then the clone chunk will be converted to a regular chunk */ TEST_P(CSDataStore_test, PasteChunkTest1) { // initialize @@ -3887,7 +3887,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { char* buf = new char[length]; memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -3918,7 +3918,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { location)); } - // case1:chunk 不存在 + // Case1: chunk does not exist { id = 4; // not exist ASSERT_EQ(CSErrorCode::ChunkNotExistError, @@ -3928,7 +3928,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { length)); } - // case2:chunk存在,请求偏移超过chunk文件大小或偏移长度未对齐 + // Case2: chunk exists, requested offset exceeds chunk file size or offset length is not aligned { id = 3; // not exist offset = chunksize_; @@ -3953,12 +3953,12 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { length)); } - // case3:chunk存在,但不是clone chunk + // Case3: chunk exists, but not clone chunk { EXPECT_CALL(*lfs_, Write(_, Matcher(NotNull()), _, _)) .Times(0); - // 快照不存在 + // The snapshot does not exist id = 2; offset = 0; length = blocksize_; @@ -3968,7 +3968,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { offset, length)); - // 快照存在 + // Snapshot exists id = 1; offset = 0; ASSERT_EQ(CSErrorCode::Success, @@ -3978,7 +3978,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { length)); } - // case4:chunk存在,且是clone chunk,写入区域之前未写过 + // Case4: chunk exists and is a clone chunk, which has not been written before writing to the region { id = 3; // not exist offset = blocksize_; @@ -3995,7 +3995,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { buf, offset, length)); - // 检查paste后chunk的状态 + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); @@ -4003,7 +4003,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case5:chunk存在,且是clone chunk,写入区域之前已写过 + // Case5: chunk exists and is a clone chunk, which has been written before writing to the region { id = 3; // not exist offset = blocksize_; @@ -4019,19 +4019,19 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { buf, offset, length)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(1, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(1)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); } - // case6:chunk存在,且是clone chunk,部分区域已写过,部分未写过 + // Case6: chunk exists and is a clone chunk. Some areas have been written, while others have not { id = 3; // not exist offset = 0; length = 4 * blocksize_; - // [2 * blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage + // [2 * blocksize_, 4 * blocksize_) area has been written, [0, blocksize_) is a metapage EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_, blocksize_)) .Times(1); @@ -4043,19 +4043,19 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { .Times(1); ASSERT_EQ(CSErrorCode::Success, dataStore->PasteChunk(id, buf, offset, length)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(4, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(4)); } - // case7:遍写整个chunk + // Case7: Overwrite the entire chunk { id = 3; // not exist offset = 0; length = chunksize_; - // [blocksize_, 4 * blocksize_)区域已写过,[0, blocksize_)为metapage + // [blocksize_, 4 * blocksize_) area has been written, [0, blocksize_) is a metapage EXPECT_CALL(*lfs_, Write(4, Matcher(NotNull()), metapagesize_ + 4 * blocksize_, @@ -4069,7 +4069,7 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { buf, offset, length)); - // paste后,chunk的状态不变 + // After paste, the state of the chunk remains unchanged ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); @@ -4088,10 +4088,10 @@ TEST_P(CSDataStore_test, PasteChunkTest1) { /* * PasteChunkErrorTest - * case1:写数据时失败 - * 预期结果1:返回InternalError,chunk状态不变 - * case2:更新metapage时失败 - * 预期结果2:返回InternalError,chunk状态不变 + * Case1: Failed to write data + * Expected result 1: InternalError returned, chunk status remains unchanged + * Case2: Failed to update metapage + * Expected result 2: InternalError returned, chunk status remains unchanged */ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { // initialize @@ -4106,7 +4106,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { char* buf = new char[length]; // NOLINT memset(buf, 0, length); CSChunkInfo info; - // 创建 clone chunk + // Create clone chunk { char chunk3MetaPage[metapagesize_]; // NOLINT(runtime/arrays) memset(chunk3MetaPage, 0, sizeof(chunk3MetaPage)); @@ -4136,7 +4136,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { chunksize_, location)); } - // case1:写数据时失败 + // Case1: Failed to write data { id = 3; // not exist offset = blocksize_; @@ -4153,12 +4153,12 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { buf, offset, length)); - // 检查paste后chunk的状态 + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); } - // case2:更新metapage时失败 + // Case2: Failed to update metapage { id = 3; // not exist offset = blocksize_; @@ -4175,7 +4175,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { buf, offset, length)); - // 检查paste后chunk的状态 + // Check the status of chunk after paste ASSERT_EQ(CSErrorCode::Success, dataStore->GetChunkInfo(id, &info)); ASSERT_EQ(true, info.isClone); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); @@ -4193,7 +4193,7 @@ TEST_P(CSDataStore_test, PasteChunkErrorTest1) { } /* - * chunk不存在 + * Chunk does not exist */ TEST_P(CSDataStore_test, GetHashErrorTest1) { // initialize @@ -4219,7 +4219,7 @@ TEST_P(CSDataStore_test, GetHashErrorTest1) { } /* - * read报错 + * Read error */ TEST_P(CSDataStore_test, GetHashErrorTest2) { // initialize @@ -4247,7 +4247,7 @@ TEST_P(CSDataStore_test, GetHashErrorTest2) { } /* - * 获取datastore状态测试 + * Obtain Datastore Status Test */ TEST_P(CSDataStore_test, GetStatusTest) { // initialize diff --git a/test/chunkserver/datastore/file_helper_unittest.cpp b/test/chunkserver/datastore/file_helper_unittest.cpp index 0f7ca39b95..25eb7550ff 100644 --- a/test/chunkserver/datastore/file_helper_unittest.cpp +++ b/test/chunkserver/datastore/file_helper_unittest.cpp @@ -64,11 +64,11 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { vector chunkFiles; vector snapFiles; - // case1:List失败,返回-1 + // Case1: List failed, returned -1 EXPECT_CALL(*fs_, List(_, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); - // 如果返回ENOENT错误,直接返回成功 + // If an ENOENT error is returned, success is returned directly EXPECT_CALL(*fs_, List(_, _)) .WillOnce(Return(-ENOENT)); ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); @@ -77,7 +77,7 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { string chunk1 = "chunk_1"; string chunk2 = "chunk_2"; string snap1 = "chunk_1_snap_1"; - string other = "chunk_1_S"; // 非法文件名 + string other = "chunk_1_S"; // Illegal file name files.emplace_back(chunk1); files.emplace_back(chunk2); files.emplace_back(snap1); @@ -86,7 +86,7 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { .WillRepeatedly(DoAll(SetArgPointee<1>(files), Return(0))); - // case2:List成功,返回chunk文件和snapshot文件 + // Case2: List successful, returning chunk file and snapshot file ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, &chunkFiles, &snapFiles)); ASSERT_EQ(2, chunkFiles.size()); ASSERT_STREQ(chunk1.c_str(), chunkFiles[0].c_str()); @@ -94,7 +94,7 @@ TEST_F(FileHelper_MockTest, ListFilesTest) { ASSERT_EQ(1, snapFiles.size()); ASSERT_STREQ(snap1.c_str(), snapFiles[0].c_str()); - // case3:允许vector为空指针 + // Case3: Allow vector to be a null pointer ASSERT_EQ(0, fileHelper_->ListFiles(baseDir, nullptr, nullptr)); } diff --git a/test/chunkserver/datastore/filepool_mock_unittest.cpp b/test/chunkserver/datastore/filepool_mock_unittest.cpp index f9fc0502e1..3f82c97d28 100644 --- a/test/chunkserver/datastore/filepool_mock_unittest.cpp +++ b/test/chunkserver/datastore/filepool_mock_unittest.cpp @@ -78,7 +78,7 @@ class CSChunkfilePoolMockTest : public testing::Test { void TearDown() {} static Json::Value GenerateMetaJson(bool hasBlockSize = false) { - // 正常的meta文件的json格式 + // JSON format for normal meta files FilePoolMeta meta; meta.chunkSize = CHUNK_SIZE; meta.metaPageSize = PAGE_SIZE; @@ -161,7 +161,7 @@ class CSChunkfilePoolMockTest : public testing::Test { std::shared_ptr lfs_; }; -// PersistEnCodeMetaInfo接口的异常测试 +// Exception testing for PersistEnCodeMetaInfo interface TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { FilePoolMeta meta; meta.chunkSize = CHUNK_SIZE; @@ -169,7 +169,7 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { meta.hasBlockSize = false; meta.filePoolPath = poolDir; - // open失败 + // open failed { EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) .WillOnce(Return(-1)); @@ -180,7 +180,7 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); } - // open成功,write失败 + // open successful, write failed { EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) .WillOnce(Return(1)); @@ -191,7 +191,7 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { ASSERT_EQ(-1, FilePoolHelper::PersistEnCodeMetaInfo(lfs_, meta, poolMetaPath)); } - // open成功,write成功 + // open successful, write successful { EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) .WillOnce(Return(1)); @@ -204,11 +204,11 @@ TEST_F(CSChunkfilePoolMockTest, PersistEnCodeMetaInfoTest) { } } -// DecodeMetaInfoFromMetaFile接口的异常测试 +// Exception testing for DecodeMetaInfoFromMetaFile interface TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { FilePoolMeta meta; - // open失败 + // open failed { EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) .WillOnce(Return(-1)); @@ -219,7 +219,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // read失败 + // read failed { EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) .WillOnce(Return(1)); @@ -230,7 +230,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // read成功,解析Json格式失败 + // read successful, parsing Json format failed { char buf[metaFileSize] = {0}; EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) @@ -243,7 +243,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,chunksize为空 + // parsing Json format succeeded, chunksize is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -261,7 +261,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,metapagesize为空 + // parsing Json format succeeded, metapagesize is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -279,7 +279,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,kFilePoolPath为空 + // parsing Json format succeeded, kFilePoolPath is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -297,7 +297,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,kCRC为空 + // Successfully parsed Json format, kCRC is empty { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -315,7 +315,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 解析Json格式成功,crc不匹配 + // Successfully parsed Json format, crc mismatch { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -333,7 +333,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { ASSERT_EQ(-1, FilePoolHelper::DecodeMetaInfoFromMetaFile( lfs_, poolMetaPath, metaFileSize, &meta)); } - // 正常流程 + // Normal process { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(); @@ -351,7 +351,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { lfs_, poolMetaPath, metaFileSize, &meta)); } - // 正常流程 + // Normal process { char buf[metaFileSize] = {0}; Json::Value root = GenerateMetaJson(true); @@ -371,7 +371,7 @@ TEST_F(CSChunkfilePoolMockTest, DecodeMetaInfoFromMetaFileTest) { } TEST_F(CSChunkfilePoolMockTest, InitializeTest) { - // 初始化options + // Initialize options FilePoolOptions options; options.getFileFromPool = true; memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); @@ -381,12 +381,12 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { options.metaFileSize = metaFileSize; options.retryTimes = 3; - /****************getFileFromPool为true**************/ - // checkvalid时失败 + /****************getFileFromPool is true**************/ + // Failed while checking valid { - // DecodeMetaInfoFromMetaFile在上面已经单独测试过了 - // 这里选上面中的一组异常用例来检验即可 - // 解析json格式失败 + // DecodeMetaInfoFromMetaFile has been tested separately on it + // Here, select a set of uncommon examples from the above to test + // parsing JSON format failed FilePool pool(lfs_); char buf[metaFileSize] = {0}; EXPECT_CALL(*lfs_, Open(poolMetaPath, _)) @@ -398,7 +398,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // getFileFromPool为true,checkvalid成功,当前目录不存在 + // getFileFromPool is true, checkvalid succeeded, current directory does not exist { FilePool pool(lfs_); FakeMetaFile(); @@ -406,7 +406,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .WillOnce(Return(false)); ASSERT_EQ(false, pool.Initialize(options)); } - // 当前目录存在,list目录失败 + // The current directory exists, list directory failed { FilePool pool(lfs_); FakeMetaFile(); @@ -416,7 +416,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .WillOnce(Return(-1)); ASSERT_EQ(false, pool.Initialize(options)); } - // list目录成功,文件名中包含非数字字符 + // list directory successful, file name contains non numeric characters { FilePool pool(lfs_); FakeMetaFile(); @@ -429,7 +429,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { Return(0))); ASSERT_EQ(false, pool.Initialize(options)); } - // list目录成功,目录中包含非普通文件类型的对象 + // list directory succeeded, it contains objects of non ordinary file types { FilePool pool(lfs_); FakeMetaFile(); @@ -444,7 +444,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .WillOnce(Return(false)); ASSERT_EQ(false, pool.Initialize(options)); } - // list目录成功,open文件时失败 + // list directory successful, open file failed { FilePool pool(lfs_); FakeMetaFile(); @@ -461,7 +461,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .WillOnce(Return(-1)); ASSERT_EQ(false, pool.Initialize(options)); } - // stat文件信息时失败 + // Failed to retrieve stat file information { FilePool pool(lfs_); FakeMetaFile(); @@ -482,7 +482,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // stat文件信息成功,文件大小不匹配 + // stat file information successful, file size mismatch { FilePool pool(lfs_); FakeMetaFile(); @@ -507,7 +507,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .Times(1); ASSERT_EQ(false, pool.Initialize(options)); } - // 文件信息匹配 + // File information matching { FilePool pool(lfs_); FakeMetaFile(); @@ -534,9 +534,9 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { ASSERT_EQ(1, pool.Size()); } - /****************getFileFromPool为false**************/ + /****************getFileFromPool is false**************/ options.getFileFromPool = false; - // 当前目录不存在,创建目录失败 + // The current directory does not exist, creating directory failed { FilePool pool(lfs_); EXPECT_CALL(*lfs_, DirExists(_)) @@ -545,7 +545,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .WillOnce(Return(-1)); ASSERT_EQ(false, pool.Initialize(options)); } - // 当前目录不存在,创建目录成功 + // The current directory does not exist, creating the directory succeeded { FilePool pool(lfs_); EXPECT_CALL(*lfs_, DirExists(_)) @@ -554,7 +554,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { .WillOnce(Return(0)); ASSERT_EQ(true, pool.Initialize(options)); } - // 当前目录存在 + // The current directory exists { FilePool pool(lfs_); EXPECT_CALL(*lfs_, DirExists(_)) @@ -564,7 +564,7 @@ TEST_F(CSChunkfilePoolMockTest, InitializeTest) { } TEST_F(CSChunkfilePoolMockTest, GetFileTest) { - // 初始化options + // Initialize options FilePoolOptions options; options.getFileFromPool = true; memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); @@ -577,14 +577,14 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { char metapage[PAGE_SIZE] = {0}; - /****************getFileFromPool为true**************/ - // 没有剩余chunk的情况 + /****************getFileFromPool is true**************/ + // There is no remaining chunk situation { FilePool pool(lfs_); FakePool(&pool, options, 0); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // 存在chunk,open时失败 + // Chunk present, open failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -596,7 +596,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,write时失败 + // Chunk exists, write failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -611,7 +611,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,fsync时失败 + // Chunk present, fsync failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -629,7 +629,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,close时失败 + // Chunk exists, closing failed { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -648,7 +648,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,rename时返回EEXIST错误 + // Chunk exists, EEXIST error returned when renaming { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -665,7 +665,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-EEXIST, pool.GetFile(targetPath, metapage)); ASSERT_EQ(9, pool.Size()); } - // 存在chunk,rename时返回非EEXIST错误 + // Chunk exists, non EEXIST error returned when renaming { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -687,7 +687,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); ASSERT_EQ(10 - retryTimes, pool.Size()); } - // 存在chunk,rename成功 + // Chunk exists, rename successful { FilePool pool(lfs_); FakePool(&pool, options, 10); @@ -706,8 +706,8 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { } options.getFileFromPool = false; - /****************getFileFromPool为false**************/ - // open 时失败 + /****************getFileFromPool is false**************/ + // Failed on open { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -718,7 +718,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { .Times(0); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // fallocate 时失败 + // Failed while failing { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -732,7 +732,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { .Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // write 时失败 + // Failed while writing { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -750,7 +750,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { .Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // fsync 时失败 + // Fsync failed { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -771,7 +771,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { .Times(retryTimes); ASSERT_EQ(-1, pool.GetFile(targetPath, metapage)); } - // close 时失败 + // Failed to close { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -796,7 +796,7 @@ TEST_F(CSChunkfilePoolMockTest, GetFileTest) { } TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { - // 初始化options + // Initialize options FilePoolOptions options; options.getFileFromPool = true; memcpy(options.filePoolDir, poolDir.c_str(), poolDir.size()); @@ -807,9 +807,9 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { int retryTimes = 3; options.retryTimes = retryTimes; - /****************getFileFromPool为false**************/ + /****************getFileFromPool is false**************/ options.getFileFromPool = false; - // delete文件时失败 + // Failed to delete file { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -817,7 +817,7 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { .WillOnce(Return(-1)); ASSERT_EQ(-1, pool.RecycleFile(filePath1)); } - // delete文件成功 + // Successfully deleted file { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -826,31 +826,31 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { ASSERT_EQ(0, pool.RecycleFile(filePath1)); } - /****************getFileFromPool为true**************/ + /****************getFileFromPool is true**************/ options.getFileFromPool = true; - // open失败 + // open failed { FilePool pool(lfs_); FakePool(&pool, options, 0); EXPECT_CALL(*lfs_, Open(targetPath, _)) .WillOnce(Return(-1)); - // 失败直接Delete + // Failed to delete directly EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(0)); - // Delete 成功就返回0 + // If Delete is successful, return 0 ASSERT_EQ(0, pool.RecycleFile(targetPath)); EXPECT_CALL(*lfs_, Open(targetPath, _)) .WillOnce(Return(-1)); - // 失败直接Delete + // Failed to delete directly EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(-1)); - // Delete 失败就返回错误码 + // If Delete fails, an error code will be returned ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } - // Fstat失败 + // Fstat failed { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -861,10 +861,10 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { .WillOnce(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(1); - // 失败直接Delete + // Failed to delete directly EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(0)); - // Delete 成功就返回0 + // If Delete is successful, return 0 ASSERT_EQ(0, pool.RecycleFile(targetPath)); EXPECT_CALL(*lfs_, Open(targetPath, _)) @@ -873,14 +873,14 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { .WillOnce(Return(-1)); EXPECT_CALL(*lfs_, Close(1)) .Times(1); - // 失败直接Delete + // Failed to delete directly EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(-1)); - // Delete 失败就返回错误码 + // If Delete fails, an error code will be returned ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } - // Fstat成功,大小不匹配 + // Fstat successful, size mismatch { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -894,10 +894,10 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { Return(0))); EXPECT_CALL(*lfs_, Close(1)) .Times(1); - // 失败直接Delete + // Failed to delete directly EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(0)); - // Delete 成功就返回0 + // If Delete is successful, return 0 ASSERT_EQ(0, pool.RecycleFile(targetPath)); EXPECT_CALL(*lfs_, Open(targetPath, _)) @@ -907,14 +907,14 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { Return(0))); EXPECT_CALL(*lfs_, Close(1)) .Times(1); - // 失败直接Delete + // Failed to delete directly EXPECT_CALL(*lfs_, Delete(targetPath)) .WillOnce(Return(-1)); - // Delete 失败就返回错误码 + // If Delete fails, an error code will be returned ASSERT_EQ(-1, pool.RecycleFile(targetPath)); } - // Fstat信息匹配,rename失败 + // Fstat information matching, rename failed { FilePool pool(lfs_); FakePool(&pool, options, 0); @@ -934,7 +934,7 @@ TEST_F(CSChunkfilePoolMockTest, RecycleFileTest) { ASSERT_EQ(0, pool.Size()); } - // Fstat信息匹配,rename成功 + // Fstat information matching, rename successful { FilePool pool(lfs_); FakePool(&pool, options, 0); diff --git a/test/chunkserver/datastore/filepool_unittest.cpp b/test/chunkserver/datastore/filepool_unittest.cpp index 480f6da72a..7473dbf0f2 100644 --- a/test/chunkserver/datastore/filepool_unittest.cpp +++ b/test/chunkserver/datastore/filepool_unittest.cpp @@ -183,8 +183,8 @@ TEST_P(CSFilePool_test, InitializeTest) { // initialize ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cfop)); ASSERT_EQ(100, chunkFilePoolPtr_->Size()); - // 初始化阶段会扫描FilePool内的所有文件,在扫描结束之后需要关闭这些文件 - // 防止过多的文件描述符被占用 + // During the initialization phase, all files in the FilePool will be scanned, and after the scan is completed, these files need to be closed + // Prevent excessive file descriptors from being occupied ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "1")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "2")); ASSERT_FALSE(CheckFileOpenOrNot(filePoolPath + "50.clean")); @@ -583,8 +583,8 @@ TEST(CSFilePool, GetFileDirectlyTest) { fsptr = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); const std::string filePoolPath = FILEPOOL_DIR; // create chunkfile in chunkfile pool dir - // if chunkfile pool 的getFileFromPool开关关掉了,那么 - // FilePool的size是一直为0,不会从pool目录中找 + // if the getFileFromPool switch of the chunkfile pool is turned off, then + // The size of FilePool is always 0 and will not be found in the pool directory std::string filename = filePoolPath + "1000"; fsptr->Mkdir(filePoolPath); int fd = fsptr->Open(filename.c_str(), O_RDWR | O_CREAT); @@ -608,7 +608,7 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_TRUE(chunkFilePoolPtr_->Initialize(cspopt)); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); - // 测试获取chunk,chunkfile pool size不变一直为0 + // Test to obtain chunk, chunkfile pool size remains unchanged and remains at 0 char metapage[4096]; memset(metapage, '1', 4096); @@ -625,12 +625,12 @@ TEST(CSFilePool, GetFileDirectlyTest) { ASSERT_EQ(buf[i], '1'); } - // 测试回收chunk,文件被删除,FilePool Size不受影响 + // Test recycling chunk, file deleted, FilePool Size not affected chunkFilePoolPtr_->RecycleFile("./new1"); ASSERT_EQ(0, chunkFilePoolPtr_->Size()); ASSERT_FALSE(fsptr->FileExists("./new1")); - // 删除测试文件及目录 + // Delete test files and directories ASSERT_EQ(0, fsptr->Close(fd)); ASSERT_EQ(0, fsptr->Delete(filePoolPath + "1000")); ASSERT_EQ(0, fsptr->Delete(filePoolPath)); diff --git a/test/chunkserver/fake_datastore.h b/test/chunkserver/fake_datastore.h index 75b5c80330..dbafeaa966 100644 --- a/test/chunkserver/fake_datastore.h +++ b/test/chunkserver/fake_datastore.h @@ -213,7 +213,7 @@ class FakeCSDataStore : public CSDataStore { if (errorCode == CSErrorCode::Success) { return error_; } else { - // 注入错误自动恢复 + // Automatic recovery of injection errors error_ = CSErrorCode::Success; return errorCode; } diff --git a/test/chunkserver/heartbeat_helper_test.cpp b/test/chunkserver/heartbeat_helper_test.cpp index 7b9f9a9c6b..6e069b09e4 100644 --- a/test/chunkserver/heartbeat_helper_test.cpp +++ b/test/chunkserver/heartbeat_helper_test.cpp @@ -46,12 +46,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_epoch(2); std::vector newPeers; - // 1. 目标节点格式错误 + // 1. Destination node format error { - // 目标节点为空 + // The target node is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 目标节点不为空但格式有误 + // The target node is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.4"); conf.set_allocated_configchangeitem(replica); @@ -63,12 +63,12 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_configchangeitem(replica); } - // 2. 待删除节点格式错误 + // 2. The format of the node to be deleted is incorrect { - // 待删除节点为空 + // The node to be deleted is empty ASSERT_FALSE(HeartbeatHelper::BuildNewPeers(conf, &newPeers)); - // 待删除接节点不为空但格式有误 + // The node to be deleted is not empty but has incorrect format auto replica = new ::curve::common::Peer(); replica->set_address("192.0.0.1"); conf.set_allocated_oldpeer(replica); @@ -80,7 +80,7 @@ TEST(HeartbeatHelperTest, test_BuildNewPeers) { conf.set_allocated_oldpeer(replica); } - // 3. 生成新配置成功 + // 3. Successfully generated new configuration { for (int i = 0; i < 3; i++) { auto replica = conf.add_peers(); @@ -110,19 +110,19 @@ TEST(HeartbeatHelperTest, test_CopySetConfValid) { std::shared_ptr copyset; - // 1. chunkserver中不存在需要变更的copyset + // 1. There is no copyset that needs to be changed in chunkserver { ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 2. mds下发copysetConf的epoch是落后的 + // 2. The epoch of copysetConf issued by mds is outdated { copyset = std::make_shared(); EXPECT_CALL(*copyset, GetConfEpoch()).Times(2).WillOnce(Return(3)); ASSERT_FALSE(HeartbeatHelper::CopySetConfValid(conf, copyset)); } - // 3. mds下发copysetConf正常 + // 3. Mds sends copysetConf normally { EXPECT_CALL(*copyset, GetConfEpoch()).WillOnce(Return(2)); ASSERT_TRUE(HeartbeatHelper::CopySetConfValid(conf, copyset)); @@ -140,13 +140,13 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { auto copyset = std::make_shared(); - // 1. mds下发空配置 + // 1. MDS issued empty configuration { conf.set_epoch(0); ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 2. 该副本不在复制组中 + // 2. The replica is not in the replication group { conf.set_epoch(2); for (int i = 2; i <= 4; i++) { @@ -157,7 +157,7 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { ASSERT_TRUE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); } - // 3. 该副本在复制组中 + // 3. This replica is in the replication group { butil::str2endpoint("192.0.0.4:8200", &csEp); ASSERT_FALSE(HeartbeatHelper::NeedPurge(csEp, conf, copyset)); @@ -165,13 +165,13 @@ TEST(HeartbeatHelperTest, test_NeedPurge) { } TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { - // 1. peerId的格式不对 + // 1. The format of peerId is incorrect { std::string peerId = "127.0.0:5555:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); } - // 2. 对端的chunkserver_service未起起来 + // 2. Opposite chunkserver_service not started { std::string peerId = "127.0.0.1:8888:0"; ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(peerId)); @@ -187,14 +187,14 @@ TEST(HeartbeatHelperTest, test_ChunkServerLoadCopySetFin) { ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); string listenAddr(butil::endpoint2str(server->listen_address()).c_str()); - // 3. 对端copyset未加载完成 + // 3. Peer copyset not loaded completed { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); ASSERT_FALSE(HeartbeatHelper::ChunkServerLoadCopySetFin(listenAddr)); } - // 4. 对端copyset加载完成 + // 4. End to end copyset loading completed { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(true)); diff --git a/test/chunkserver/heartbeat_test.cpp b/test/chunkserver/heartbeat_test.cpp index fcfcae375a..f131ba5b38 100644 --- a/test/chunkserver/heartbeat_test.cpp +++ b/test/chunkserver/heartbeat_test.cpp @@ -63,7 +63,7 @@ class HeartbeatTest : public ::testing::Test { }; TEST_F(HeartbeatTest, TransferLeader) { - // 创建copyset + // Create copyset std::vector cslist{ "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; @@ -73,7 +73,7 @@ TEST_F(HeartbeatTest, TransferLeader) { hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo,expectleader是dst1 + // Construct the expected CopySetInfo for req, with the expectleader being dst1 ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -86,7 +86,7 @@ TEST_F(HeartbeatTest, TransferLeader) { peer->set_address(dest1); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst1 + // Construct CopySetConf in resp, transfer to dst CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -99,25 +99,25 @@ TEST_F(HeartbeatTest, TransferLeader) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::TRANSFER_LEADER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); - // 构造req中期望的CopySetInfo,expectleader是dst2 + // Construct the expected CopySetInfo for req, with the expectleader being dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); expect.set_allocated_leaderpeer(peer); - // 构造resp中的CopySetConf, transfer到dst2 + // Construct CopySetConf in resp, transfer to dst2 peer = new ::curve::common::Peer(); peer->set_address(dest2); conf.set_allocated_configchangeitem(peer); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, RemovePeer) { - // 创建copyset + // Create copyset std::vector cslist{ "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; @@ -128,7 +128,7 @@ TEST_F(HeartbeatTest, RemovePeer) { hbtest_->WaitCopysetReady(poolId, copysetId, confStr); hbtest_->TransferLeaderSync(poolId, copysetId, confStr, leaderPeer); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -138,7 +138,7 @@ TEST_F(HeartbeatTest, RemovePeer) { } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -151,53 +151,53 @@ TEST_F(HeartbeatTest, RemovePeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::REMOVE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_after_Configchange) { - // 创建copyset + // Create copyset std::vector cslist{"127.0.0.1:8200"}; std::string confStr = "127.0.0.1:8200:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, CleanPeer_not_exist_in_MDS) { - // 在chunkserver上创建一个copyset + // Create a copyset on chunkserver std::vector cslist{"127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8202:0"; hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); conf.set_epoch(0); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, AddPeer) { - // 创建copyset + // Create copyset std::vector cslist{ "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; std::string confStr = "127.0.0.1:8200:0,127.0.0.1:8201:0"; @@ -206,7 +206,7 @@ TEST_F(HeartbeatTest, AddPeer) { hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, confStr); hbtest_->WaitCopysetReady(poolId, copysetId, confStr); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -216,7 +216,7 @@ TEST_F(HeartbeatTest, AddPeer) { } expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -229,12 +229,12 @@ TEST_F(HeartbeatTest, AddPeer) { conf.set_allocated_configchangeitem(peer); conf.set_type(curve::mds::heartbeat::ADD_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } TEST_F(HeartbeatTest, ChangePeer) { - // 创建copyset + // Create copyset std::vector cslist{ "127.0.0.1:8200", "127.0.0.1:8201", "127.0.0.1:8202"}; std::string oldConf = "127.0.0.1:8200:0,127.0.0.1:8202:0"; @@ -244,7 +244,7 @@ TEST_F(HeartbeatTest, ChangePeer) { hbtest_->CreateCopysetPeers(poolId, copysetId, cslist, oldConf); hbtest_->WaitCopysetReady(poolId, copysetId, oldConf); - // 构造req中期望的CopySetInfo + // Construct the CopySetInfo expected in req ::curve::mds::heartbeat::CopySetInfo expect; expect.set_logicalpoolid(poolId); expect.set_copysetid(copysetId); @@ -254,7 +254,7 @@ TEST_F(HeartbeatTest, ChangePeer) { replica->set_address("127.0.0.1:8201:0"); expect.set_epoch(2); - // 构造resp中的CopySetConf + // Construct CopySetConf in resp CopySetConf conf; conf.set_logicalpoolid(poolId); conf.set_copysetid(copysetId); @@ -271,7 +271,7 @@ TEST_F(HeartbeatTest, ChangePeer) { conf.set_allocated_oldpeer(peer); conf.set_type(curve::mds::heartbeat::CHANGE_PEER); - // 等待变更成功 + // Waiting for successful change ASSERT_TRUE(hbtest_->WailForConfigChangeOk(conf, expect, 30 * 1000)); } diff --git a/test/chunkserver/heartbeat_test_common.cpp b/test/chunkserver/heartbeat_test_common.cpp index 20d6b444f8..5a293f52b4 100644 --- a/test/chunkserver/heartbeat_test_common.cpp +++ b/test/chunkserver/heartbeat_test_common.cpp @@ -316,7 +316,7 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( GetHeartbeat(&cntl, &req, &resp, &done); brpc::ClosureGuard done_guard(done); - // 获取当前copyset的leader + // Get the leader of the current copyset std::string sender = req->ip() + ":" + std::to_string(req->port()) + ":0"; if (1 == req->copysetinfos_size()) { @@ -333,8 +333,8 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( } } - // 如果当前req是leader发送的,判断req中的内容是否符合要求 - // 如果符合要求,返回true; 如果不符合要求,设置resp中的内容 + // If the current req is sent by the leader, determine whether the content in the req meets the requirements + // If it meets the requirements, return true; If it does not meet the requirements, set the content in resp if (leader == sender) { if (!leaderPeerSet) { auto peer = new ::curve::common::Peer(); @@ -342,7 +342,7 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( expectedInfo.set_allocated_leaderpeer(peer); } - // 判断req是否符合要求, 符合要求返回true + // Determine whether the req meets the requirements, and return true if it meets the requirements if (req->copysetinfos_size() == 1) { if (SameCopySetInfo(req->copysetinfos(0), expectedInfo)) { return true; @@ -357,7 +357,7 @@ bool HeartbeatTestCommon::WailForConfigChangeOk( } } - // 不符合要求设置resp + // Not meeting the requirements to set resp if (req->copysetinfos_size() == 1) { auto build = resp->add_needupdatecopysets(); if (!build->has_epoch()) { diff --git a/test/chunkserver/heartbeat_test_common.h b/test/chunkserver/heartbeat_test_common.h index 433f7119eb..a17fd955de 100644 --- a/test/chunkserver/heartbeat_test_common.h +++ b/test/chunkserver/heartbeat_test_common.h @@ -75,56 +75,56 @@ class HeartbeatTestCommon { } /** - * CleanPeer 清空peer上指定copyset数据 + * CleanPeer: Clear the specified copyset data on the peer * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] peer chunkserver ip + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] peer chunkserver IP */ void CleanPeer( LogicPoolID poolId, CopysetID copysetId, const std::string& peer); /** - * CreateCopysetPeers 在指定chunkserverlist上创建指定配置的copyset + * CreateCopysetPeers: Create a copyset of the specified configuration on the specified chunkserverlist * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] cslist 待创建copyset的chunkserver列表 - * @param[in] conf 使用该配置作为初始配置创建copyset + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] cslist The chunkserver list for the copyset to be created + * @param[in] conf Use this configuration as the initial configuration to create a copyset */ void CreateCopysetPeers(LogicPoolID poolId, CopysetID copysetId, const std::vector &cslist, const std::string& conf); /** - * WaitCopysetReady 等待指定copyset选出leader + * WaitCopysetReady: Wait for the specified copyset to select the leader * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members */ void WaitCopysetReady( LogicPoolID poolId, CopysetID copysetId, const std::string& conf); /** - * TransferLeaderSync 触发transferleader并等待完成 + * TransferLeaderSync: Trigger transferleader and waits for completion * - * @param[in] poolId 逻辑池id - * @param[in] copysetId copyset id - * @param[in] conf 指定copyset复制组成员 - * @param[in] newLeader 目标leader + * @param[in] poolId Logical pool ID + * @param[in] copysetId copyset ID + * @param[in] conf specifies the copyset replication group members + * @param[in] newLeader Target Leader */ void TransferLeaderSync(LogicPoolID poolId, CopysetID copysetId, const std::string& conf, const std::string& newLeader); /** - * WailForConfigChangeOk 指定时间内(timeLimitMs),chunkserver是否上报了 - * 符合预期的copyset信息 + * WailForConfigChangeOk: Determine whether the chunkserver has reported the expected copyset + * information within the specified time limit (timeLimitMs). * - * @param[in] conf mds需要下发给指定copyset的变更命令 - * @param[in] expectedInfo 变更之后期望复制组配置 - * @param[in] timeLimitMs 等待时间 + * @param[in] conf mds needs to issue a change command to the specified copyset + * @param[in] expectedInfo replication group configuration after change + * @param[in] timeLimitMs waiting time * - * @return false-指定时间内copyset配置未能达到预期, true-达到预期 + * @return false - Copyset configuration failed to meet expectations within the specified time, true - met expectations */ bool WailForConfigChangeOk( const ::curve::mds::heartbeat::CopySetConf &conf, @@ -132,24 +132,24 @@ class HeartbeatTestCommon { int timeLimitMs); /** - * SameCopySetInfo 比较两个copysetInfo是否一致 + * SameCopySetInfo: Compare two copysetInfo structures to check if they are identical. * - * @param[in] orig 待比较的copysetInfo - * @param[in] expect 期望copysetInfo + * @param[in] orig The copysetInfo to compare. + * @param[in] expect The expected copysetInfo for comparison. * - * @return true-一致 false-不一致 + * @return true if they are identical, false if they are not. */ bool SameCopySetInfo( const ::curve::mds::heartbeat::CopySetInfo &orig, const ::curve::mds::heartbeat::CopySetInfo &expect); /** - * ReleaseHeartbeat heartbeat中的会掉设置为nullptr + * ReleaseHeartbeat: Set the callback in the heartbeat to nullptr. */ void ReleaseHeartbeat(); /** - * SetHeartbeatInfo 把mds接受到的cntl等信息复制到成员变量 + * SetHeartbeatInfo: Copy the cntl and other information received by mds to the member variable */ void SetHeartbeatInfo( ::google::protobuf::RpcController* cntl, @@ -158,7 +158,7 @@ class HeartbeatTestCommon { ::google::protobuf::Closure* done); /** - * GetHeartbeat 把当前成员中的cntl等变量设置到rpc中 + * GetHeartbeat: Set the current member's cntl and other variables into the RPC. */ void GetHeartbeat( ::google::protobuf::RpcController** cntl, @@ -167,7 +167,7 @@ class HeartbeatTestCommon { ::google::protobuf::Closure** done); /** - * HeartbeatCallback heartbeat回掉 + * HeartbeatCallback: heartbeat callback */ static void HeartbeatCallback( ::google::protobuf::RpcController* controller, diff --git a/test/chunkserver/heartbeat_test_main.cpp b/test/chunkserver/heartbeat_test_main.cpp index de06bcc255..fc828a9151 100644 --- a/test/chunkserver/heartbeat_test_main.cpp +++ b/test/chunkserver/heartbeat_test_main.cpp @@ -133,7 +133,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create chunkserver process 0"; } else if (pids[i] == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability of getting stuck due to incompatible fork() */ return RunChunkServer(i, sizeof(param[i]) / sizeof(char *), const_cast(param[i])); @@ -148,8 +148,8 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to create test proccess"; } else if (pid == 0) { /* - * RUN_ALL_TESTS内部可能会调用LOG(), - * 有较低概率因不兼容fork()而卡死 + * LOG() may be called internally in RUN_ALL_TESTS, + * There is a low probability of getting stuck due to incompatible fork() */ ret = RUN_ALL_TESTS(); return ret; @@ -171,7 +171,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "Failed to restart chunkserver process 1"; } else if (pid == 0) { /* - * RunChunkServer内部会调用LOG(), 有较低概率因不兼容fork()而卡死 + * RunChunkServer will call LOG() internally, with a low probability of getting stuck due to incompatible fork() */ ret = RunChunkServer(1, sizeof(param[1]) / sizeof(char *), const_cast(param[1])); diff --git a/test/chunkserver/inflight_throttle_test.cpp b/test/chunkserver/inflight_throttle_test.cpp index 8faa18d76e..9d91159154 100644 --- a/test/chunkserver/inflight_throttle_test.cpp +++ b/test/chunkserver/inflight_throttle_test.cpp @@ -31,7 +31,7 @@ namespace chunkserver { using curve::common::Thread; TEST(InflightThrottleTest, basic) { - // 基本测试 + // Basic testing { uint64_t maxInflight = 1; InflightThrottle inflightThrottle(maxInflight); @@ -45,7 +45,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发加 + // Concurrent addition { uint64_t maxInflight = 10000; InflightThrottle inflightThrottle(maxInflight); @@ -78,7 +78,7 @@ TEST(InflightThrottleTest, basic) { ASSERT_FALSE(inflightThrottle.IsOverLoad()); } - // 并发减 + // Concurrent reduction { uint64_t maxInflight = 16; InflightThrottle inflightThrottle(maxInflight); diff --git a/test/chunkserver/metrics_test.cpp b/test/chunkserver/metrics_test.cpp index 282802336f..3722281e95 100644 --- a/test/chunkserver/metrics_test.cpp +++ b/test/chunkserver/metrics_test.cpp @@ -148,7 +148,7 @@ class CSMetricTest : public ::testing::Test { void CreateConfigFile() { confFile_ = "csmetric.conf"; - // 创建配置文件 + // Create Configuration File std::string confItem; std::ofstream cFile(confFile_); ASSERT_TRUE(cFile.is_open()); @@ -210,18 +210,18 @@ TEST_F(CSMetricTest, CopysetMetricTest) { int rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); - // 如果copyset的metric已经存在,返回-1 + // If the metric for the copyset already exists, return -1 rc = metric_->CreateCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, -1); - // 获取不存在的copyset metric,返回nullptr + // Get non-existent copyset metric and return nullptr CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, 2); ASSERT_EQ(copysetMetric, nullptr); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); ASSERT_NE(copysetMetric, nullptr); - // 删除copyset metric后,再去获取返回nullptr + // After deleting the copyset metric, go to retrieve and return nullptr rc = metric_->RemoveCopysetMetric(logicId, copysetId); ASSERT_EQ(rc, 0); copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); @@ -257,7 +257,7 @@ TEST_F(CSMetricTest, OnRequestTest) { const IOMetricPtr cpDownloadMetric = copysetMetric->GetIOMetric(CSIOMetricType::DOWNLOAD); - // 统计写入成功的情况 + // Count the success of writing metric_->OnRequest(logicId, copysetId, CSIOMetricType::WRITE_CHUNK); ASSERT_EQ(1, serverWriteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverWriteMetric->ioNum_.get_value()); @@ -268,7 +268,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 + // Statistics on successful reads metric_->OnRequest(logicId, copysetId, CSIOMetricType::READ_CHUNK); ASSERT_EQ(1, serverReadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverReadMetric->ioNum_.get_value()); @@ -279,7 +279,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 + // Statistics on successful recovery metric_->OnRequest(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK); ASSERT_EQ(1, serverRecoverMetric->reqNum_.get_value()); ASSERT_EQ(0, serverRecoverMetric->ioNum_.get_value()); @@ -290,7 +290,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 + // Count the success of pass metric_->OnRequest(logicId, copysetId, CSIOMetricType::PASTE_CHUNK); ASSERT_EQ(1, serverPasteMetric->reqNum_.get_value()); ASSERT_EQ(0, serverPasteMetric->ioNum_.get_value()); @@ -301,7 +301,7 @@ TEST_F(CSMetricTest, OnRequestTest) { ASSERT_EQ(0, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 + // Statistics on successful downloads metric_->OnRequest(logicId, copysetId, CSIOMetricType::DOWNLOAD); ASSERT_EQ(1, serverDownloadMetric->reqNum_.get_value()); ASSERT_EQ(0, serverDownloadMetric->ioNum_.get_value()); @@ -345,7 +345,7 @@ TEST_F(CSMetricTest, OnResponseTest) { size_t size = PAGE_SIZE; int64_t latUs = 100; bool hasError = false; - // 统计写入成功的情况 + // Count the success of writing metric_->OnResponse( logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); @@ -357,7 +357,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpWriteMetric->errorNum_.get_value()); - // 统计读取成功的情况 + // Statistics on successful reads metric_->OnResponse( logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); @@ -369,7 +369,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpReadMetric->errorNum_.get_value()); - // 统计恢复成功的情况 + // Statistics on successful recovery metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); @@ -381,7 +381,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpRecoverMetric->errorNum_.get_value()); - // 统计paste成功的情况 + // Count the success of pass metric_->OnResponse( logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); @@ -393,7 +393,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(0, cpPasteMetric->errorNum_.get_value()); - // 统计下载成功的情况 + // Statistics on successful downloads metric_->OnResponse( logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); @@ -406,7 +406,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(0, cpDownloadMetric->errorNum_.get_value()); hasError = true; - // 统计写入失败的情况,错误数增加,其他不变 + // Count the number of write failures, increase the number of errors, and keep everything else unchanged metric_->OnResponse( logicId, copysetId, CSIOMetricType::WRITE_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverWriteMetric->reqNum_.get_value()); @@ -418,7 +418,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpWriteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpWriteMetric->errorNum_.get_value()); - // 统计读取失败的情况,错误数增加,其他不变 + // Count the number of read failures, increase the number of errors, and keep everything else unchanged metric_->OnResponse( logicId, copysetId, CSIOMetricType::READ_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverReadMetric->reqNum_.get_value()); @@ -430,7 +430,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpReadMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpReadMetric->errorNum_.get_value()); - // 统计恢复失败的情况 + // Statistics on recovery failures metric_->OnResponse(logicId, copysetId, CSIOMetricType::RECOVER_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverRecoverMetric->reqNum_.get_value()); @@ -442,7 +442,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpRecoverMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpRecoverMetric->errorNum_.get_value()); - // 统计paste失败的情况 + // Count the situation of pass failures metric_->OnResponse( logicId, copysetId, CSIOMetricType::PASTE_CHUNK, size, latUs, hasError); ASSERT_EQ(0, serverPasteMetric->reqNum_.get_value()); @@ -454,7 +454,7 @@ TEST_F(CSMetricTest, OnResponseTest) { ASSERT_EQ(PAGE_SIZE, cpPasteMetric->ioBytes_.get_value()); ASSERT_EQ(1, cpPasteMetric->errorNum_.get_value()); - // 统计下载失败的情况 + // Statistics on download failures metric_->OnResponse( logicId, copysetId, CSIOMetricType::DOWNLOAD, size, latUs, hasError); ASSERT_EQ(0, serverDownloadMetric->reqNum_.get_value()); @@ -468,18 +468,18 @@ TEST_F(CSMetricTest, OnResponseTest) { } TEST_F(CSMetricTest, CountTest) { - // 初始状态下,没有copyset,FilePool中有chunkNum个chunk + // In the initial state, there is no copyset and there are chunkNum chunks in FilePool ASSERT_EQ(0, metric_->GetCopysetCount()); ASSERT_EQ(10, metric_->GetChunkLeftCount()); // Shared with chunk file pool ASSERT_EQ(0, metric_->GetWalSegmentLeftCount()); - // 创建copyset + // Create copyset Configuration conf; CopysetID copysetId = 1; ASSERT_TRUE(copysetMgr_->CreateCopysetNode(logicId, copysetId, conf)); ASSERT_EQ(1, metric_->GetCopysetCount()); - // 此时copyset下面没有chunk和快照 + // At this point, there are no chunks or snapshots under the copyset CopysetMetricPtr copysetMetric = metric_->GetCopysetMetric(logicId, copysetId); // NOLINT ASSERT_EQ(0, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); @@ -534,7 +534,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric2->GetWalSegmentCount()); ASSERT_EQ(2, metric_->GetTotalWalSegmentCount()); - // 写入数据生成chunk + // Write data to generate chunk std::shared_ptr datastore = copysetMgr_->GetCopysetNode(logicId, copysetId)->GetDataStore(); ChunkID id = 1; @@ -553,7 +553,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(0, metric_->GetTotalCloneChunkCount()); - // 增加版本号,生成快照 + // Add version number and generate snapshot seq = 2; ASSERT_EQ(CSErrorCode::Success, datastore->WriteChunk(id, seq, dataBuf, offset, length, nullptr)); @@ -561,14 +561,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(1, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 删除快照 + // Delete snapshot ASSERT_EQ(CSErrorCode::Success, datastore->DeleteSnapshotChunkOrCorrectSn(id, seq)); ASSERT_EQ(1, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(0, copysetMetric->GetCloneChunkCount()); - // 创建 clone chunk + // Create clone chunk ChunkID id2 = 2; ChunkID id3 = 3; std::string location = "test@cs"; @@ -580,7 +580,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(2, copysetMetric->GetCloneChunkCount()); - // clone chunk被覆盖写一遍,clone chun转成普通chunk + // The clone chunk is overwritten and written once, converting it to a regular chunk char* buf2 = new char[CHUNK_SIZE]; butil::IOBuf dataBuf2; dataBuf2.append(buf2, CHUNK_SIZE); @@ -591,14 +591,14 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 删除上面的chunk + // Delete the chunk above ASSERT_EQ(CSErrorCode::Success, datastore->DeleteChunk(id2, 1)); ASSERT_EQ(2, copysetMetric->GetChunkCount()); ASSERT_EQ(0, copysetMetric->GetSnapshotCount()); ASSERT_EQ(1, copysetMetric->GetCloneChunkCount()); - // 模拟copyset重新加载datastore,重新初始化后,chunk数量不变 + // Simulate copyset to reload the datastore, and after reinitialization, the number of chunks remains unchanged // for bug fix: CLDCFS-1473 datastore->Initialize(); ASSERT_EQ(2, copysetMetric->GetChunkCount()); @@ -608,7 +608,7 @@ TEST_F(CSMetricTest, CountTest) { ASSERT_EQ(0, metric_->GetTotalSnapshotCount()); ASSERT_EQ(1, metric_->GetTotalCloneChunkCount()); - // 模拟copyset放入回收站测试 + // Simulate copyset placement in the recycle bin for testing ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId)); ASSERT_TRUE(copysetMgr_->PurgeCopysetNodeData(logicId, copysetId2)); ASSERT_EQ(nullptr, metric_->GetCopysetMetric(logicId, copysetId)); @@ -619,7 +619,7 @@ TEST_F(CSMetricTest, CountTest) { // copysetId2: 1(wal) ASSERT_EQ(4, metric_->GetChunkTrashedCount()); - // 测试leader count计数 + // Test leader count count ASSERT_EQ(0, metric_->GetLeaderCount()); metric_->IncreaseLeaderCount(); ASSERT_EQ(1, metric_->GetLeaderCount()); @@ -639,11 +639,11 @@ TEST_F(CSMetricTest, ConfigTest) { "{\"conf_name\":\"chunksize\",\"conf_value\":\"1234\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), "{\"conf_name\":\"timeout\",\"conf_value\":\"100\"}"); - // 修改新增配置信息 + // Modify new configuration information conf.SetStringValue("chunksize", "4321"); conf.SetStringValue("port", "9999"); metric_->ExposeConfigMetric(&conf); - // // 验证修改后信息 + // Verify modified information ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "chunksize").c_str(), "{\"conf_name\":\"chunksize\",\"conf_value\":\"4321\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed(prefix + "timeout").c_str(), @@ -657,7 +657,7 @@ TEST_F(CSMetricTest, OnOffTest) { ChunkServerMetricOptions metricOptions; metricOptions.port = PORT; metricOptions.ip = IP; - // 关闭metric开关后进行初始化 + // Initialize after turning off the metric switch { metricOptions.collectMetric = false; ASSERT_EQ(0, metric_->Init(metricOptions)); @@ -669,7 +669,7 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(ret, true); metric_->ExposeConfigMetric(&conf); } - // 初始化后获取所有指标项都为空 + // Obtain all indicator items as empty after initialization { ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::READ_CHUNK), nullptr); ASSERT_EQ(metric_->GetIOMetric(CSIOMetricType::WRITE_CHUNK), nullptr); @@ -685,7 +685,7 @@ TEST_F(CSMetricTest, OnOffTest) { ASSERT_EQ(metric_->GetTotalCloneChunkCount(), 0); ASSERT_EQ(metric_->GetTotalWalSegmentCount(), 0); } - // 创建copyset的metric返回成功,但实际并未创建 + // Creating the metric for the copyset returned success, but it was not actually created { CopysetID copysetId = 1; ASSERT_EQ(0, metric_->CreateCopysetMetric(logicId, copysetId)); @@ -696,7 +696,7 @@ TEST_F(CSMetricTest, OnOffTest) { PAGE_SIZE, 100, false); ASSERT_EQ(0, metric_->RemoveCopysetMetric(logicId, copysetId)); } - // 增加leader count,但是实际未计数 + // Increase the leader count, but it is not actually counted { metric_->IncreaseLeaderCount(); ASSERT_EQ(metric_->GetLeaderCount(), 0); diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp index b33d196d95..74c32eccc8 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_mock_unittest.cpp @@ -153,7 +153,7 @@ class RaftSnapshotFilesystemAdaptorMockTest : public testing::Test { }; TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { - // 1. open flag不带CREAT, open失败 + // 1. open flag without CREAT, open failed CreateChunkFile("./10"); std::string path = "./10"; butil::File::Error e; @@ -167,8 +167,8 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { ASSERT_EQ(FilePoolPtr_->Size(), 3); ASSERT_EQ(nullptr, fa); - // 2. open flag带CREAT, 从FilePool取文件,但是FilePool打开文件失败 - // 所以还是走原有逻辑,本地创建文件成功 + // 2. open flag with CREAT to retrieve files from FilePool, but FilePool failed to open the file + // So we still follow the original logic and successfully create the file locally EXPECT_CALL(*lfs, Open(_, _)).Times(3).WillOnce(Return(-1)) .WillOnce(Return(-1)) .WillOnce(Return(-1)); @@ -182,7 +182,7 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { ASSERT_FALSE(fsptr->FileExists("./10")); ASSERT_EQ(nullptr, fa); - // 3. 待创建文件在Filter中,但是直接本地创建该文件,创建成功 + // 3. The file to be created is in Filter, but it was created locally and successfully EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillOnce(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); path = BRAFT_SNAPSHOT_META_FILE; @@ -191,14 +191,14 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, open_file_mock_test) { } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { - // 1. 删除文件,文件存在且在过滤名单里,但delete失败,返回false + // 1. Delete file. The file exists and is on the filter list, but delete failed with false return EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); bool ret = fsadaptor->delete_file(BRAFT_SNAPSHOT_META_FILE, true); ASSERT_FALSE(ret); - // 2. 删除文件,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 2. Delete file. The file exists and is not on the filter list, but the recycle chunk failed with false return EXPECT_CALL(*lfs, Delete(_)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, DirExists(_)).Times(1).WillRepeatedly(Return(false)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); @@ -206,7 +206,7 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { ret = fsadaptor->delete_file("temp", true); ASSERT_FALSE(ret); - // 3. 删除目录,文件存在且不在过滤名单里,但recycle chunk失败,返回false + // 3. Delete directory. The file exists and is not on the filter list, but the recycle chunk failed with false return std::vector dircontent; dircontent.push_back("/2"); dircontent.push_back("/1"); @@ -222,13 +222,13 @@ TEST_F(RaftSnapshotFilesystemAdaptorMockTest, delete_file_mock_test) { } TEST_F(RaftSnapshotFilesystemAdaptorMockTest, rename_mock_test) { - // 1. 重命名文件,文件存在且在过滤名单里,但Rename失败,返回false + // 1. Renaming file, file exists and is on the filter list, but Rename failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(-1)); EXPECT_CALL(*lfs, FileExists(_)).Times(0); bool ret = fsadaptor->rename("1", BRAFT_SNAPSHOT_META_FILE); ASSERT_FALSE(ret); - // 2. 重命名文件,文件存在且不在过滤名单里,但Rename失败,返回false + // 2. Renaming file. The file exists and is not on the filter list, but Rename failed with false return EXPECT_CALL(*lfs, Rename(_, _, _)).Times(1).WillRepeatedly(Return(0)); EXPECT_CALL(*lfs, FileExists(_)).Times(1).WillRepeatedly(Return(true)); EXPECT_CALL(*lfs, Open(_, _)).Times(1).WillRepeatedly(Return(0)); diff --git a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp index 926ccc76c5..235ef00924 100644 --- a/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp +++ b/test/chunkserver/raftsnapshot/curve_filesystem_adaptor_unittest.cpp @@ -130,7 +130,7 @@ class CurveFilesystemAdaptorTest : public testing::Test { }; TEST_F(CurveFilesystemAdaptorTest, open_file_test) { - // 1. open flag不带CREAT + // 1. Open flag without CREAT std::string path = "./raftsnap/10"; butil::File::Error e; ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); @@ -142,14 +142,14 @@ TEST_F(CurveFilesystemAdaptorTest, open_file_test) { ASSERT_FALSE(fsptr->FileExists("./raftsnap/10")); ASSERT_EQ(nullptr, fa); - // 2. open flag待CREAT, 从FilePool取文件 + // 2. Open flag for CREAT, retrieve files from FilePool ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); fa = fsadaptor->open(path, O_RDONLY | O_CLOEXEC | O_CREAT, nullptr, &e); ASSERT_EQ(chunkFilePoolPtr_->Size(), 2); ASSERT_TRUE(fsptr->FileExists("./raftsnap/10")); ASSERT_NE(nullptr, fa); - // 3. open flag待CREAT,FilePool为空时,从FilePool取文件 + // 3. Open flag, wait for CREAT, and when FilePool is empty, retrieve the file from FilePool ClearFilePool(); fa = fsadaptor->open("./raftsnap/11", O_RDONLY | O_CLOEXEC | O_CREAT, @@ -159,7 +159,7 @@ TEST_F(CurveFilesystemAdaptorTest, open_file_test) { } TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1")); ASSERT_EQ(0, fsptr->Mkdir("./test_temp/test_temp1/test_temp2")); @@ -169,11 +169,11 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { CreateChunkFile("./test_temp/test_temp1/2"); CreateChunkFile("./test_temp/test_temp1/test_temp2/1"); CreateChunkFile("./test_temp/test_temp1/test_temp2/2"); - // 非递归删除非空文件夹,返回false + // Non recursive deletion of non empty folders, returning false ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); ASSERT_FALSE(fsadaptor->delete_file("./test_temp", false)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 3); - // 递归删除文件夹,chunk被回收到FilePool + // Recursively delete folder, chunk is recycled to FilePool ASSERT_TRUE(fsadaptor->delete_file("./test_temp", true)); ASSERT_EQ(chunkFilePoolPtr_->Size(), 9); ASSERT_FALSE(fsptr->DirExists("./test_temp")); @@ -186,7 +186,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/1")); ASSERT_FALSE(fsptr->FileExists("./test_temp/test_temp1/test_temp2/2")); - // 2. 创建一个单层空目录 + // 2. Create a single level empty directory ASSERT_EQ(0, fsptr->Mkdir("./test_temp3")); ASSERT_TRUE(fsadaptor->delete_file("./test_temp3", false)); ASSERT_EQ(0, fsptr->Mkdir("./test_temp4")); @@ -195,7 +195,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_FALSE(fsptr->DirExists("./test_temp3")); ASSERT_FALSE(fsptr->DirExists("./test_temp4")); - // 3. 删除一个常规chunk文件, 会被回收到FilePool + // 3. Deleting a regular chunk file will be recycled to FilePool ASSERT_EQ(0, fsptr->Mkdir("./test_temp5")); CreateChunkFile("./test_temp5/3"); ASSERT_TRUE(fsadaptor->delete_file("./test_temp5/3", false)); @@ -212,7 +212,7 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { ASSERT_EQ(0, fsptr->Delete("./test_temp6")); - // 4. 删除一个非chunk大小的文件,会直接删除该文件 + // 4. Deleting a file of a non chunk size will directly delete the file ASSERT_EQ(0, fsptr->Mkdir("./test_temp7")); int fd = fsptr->Open("./test_temp7/5", O_RDWR | O_CREAT); char data[4096]; @@ -226,12 +226,12 @@ TEST_F(CurveFilesystemAdaptorTest, delete_file_test) { } TEST_F(CurveFilesystemAdaptorTest, rename_test) { - // 1. 创建一个多层目录,且目录中含有chunk文件 + // 1. Create a multi-level directory with chunk files in it ASSERT_EQ(0, fsptr->Mkdir("./test_temp")); std::string filename = "./test_temp/"; filename.append(BRAFT_SNAPSHOT_META_FILE); - // 目标文件size是chunksize,但是目标文件在过滤名单里,所以直接过滤 + // The target file size is chunksize, but it is on the filtering list, so it is directly filtered CreateChunkFile(filename); int poolSize = chunkFilePoolPtr_->Size(); std::string temppath = "./temp"; @@ -243,7 +243,7 @@ TEST_F(CurveFilesystemAdaptorTest, rename_test) { ASSERT_EQ(poolSize - 1, chunkFilePoolPtr_->Size()); ASSERT_EQ(0, fsptr->Delete(filename)); - // 目标文件size是chunksize,但是目标文件不在过滤名单里,所以先回收再rename + // The target file size is chunksize, but it is not on the filter list, so recycle it first and rename it again filename = "./test_temp/"; filename.append("test"); CreateChunkFile(filename); diff --git a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp index 9e3ca39605..1e82a5342f 100644 --- a/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp +++ b/test/chunkserver/raftsnapshot/curve_snapshot_attachment_test.cpp @@ -59,7 +59,7 @@ class CurveSnapshotAttachmentMockTest : public testing::Test { }; TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { - // 返回成功 + // Return successful vector fileNames; fileNames.emplace_back("chunk_1"); fileNames.emplace_back("chunk_1_snap_1"); @@ -76,13 +76,13 @@ TEST_F(CurveSnapshotAttachmentMockTest, ListTest) { EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); - // 路径结尾添加反斜杠 + // Add a backslash at the end of the path EXPECT_CALL(*fs_, List(kDataDir, _)) .WillOnce(DoAll(SetArgPointee<1>(fileNames), Return(0))); attachment_->list_attach_files(&snapFiles, std::string(kRaftSnapDir) + "/"); EXPECT_THAT(snapFiles, UnorderedElementsAre(snapPath1.c_str(), snapPath2.c_str())); - // 返回失败 + // Return failed EXPECT_CALL(*fs_, List(kDataDir, _)) .WillRepeatedly(Return(-1)); ASSERT_DEATH(attachment_->list_attach_files(&snapFiles, kRaftSnapDir), ""); diff --git a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp index 94bcc4d5a8..958bda1183 100644 --- a/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp +++ b/test/chunkserver/raftsnapshot/raftsnapshot_chunkfilepool_integration.cpp @@ -74,18 +74,18 @@ class RaftSnapFilePoolTest : public testing::Test { }; /** - * TODO(wudemiao) 后期将发 I/O 和验证再抽象一下 + * TODO(wudemiao) will further abstract I/O and verification in the later stage */ /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ static void WriteThenReadVerify(PeerId leaderId, LogicPoolID logicPoolId, @@ -158,14 +158,14 @@ static void WriteThenReadVerify(PeerId leaderId, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ static void ReadVerify(PeerId leaderId, LogicPoolID logicPoolId, @@ -204,18 +204,18 @@ static void ReadVerify(PeerId leaderId, } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据 - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot, - * 因为 log 已经被删除了, install snapshot的数据从FilePool中取文件 - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + * Verify the shutdown and restart of non-leader nodes in a cluster of 3 nodes, and control them to recover from installing snapshots. + * 1. Create a replication group with 3 replicas. + * 2. Wait for the leader to emerge, write data, and then read to verify. + * 3. Shutdown a non-leader node. + * 4. Sleep for a duration longer than a snapshot interval, then write and read data. + * 5. Sleep for a duration longer than a snapshot interval again, then write and read data. + * Steps 4 and 5 are to ensure that at least two snapshots are taken. Therefore, + * when the node restarts, it must recover via an install snapshot because the log has already been deleted. The data for the install snapshot is retrieved from the FilePool. + * 6. Wait for the leader to emerge, then read the previously written data for verification. + * 7. Transfer leadership to the shut down peer. + * 8. Verify the data written before the transfer of leadership. + * 9. Write data again, then read it to verify. */ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -238,12 +238,12 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.StartPeer(peer2, false, true, true)); ASSERT_EQ(0, cluster.StartPeer(peer3, false, true, true)); - // 等待FilePool创建成功 + // Waiting for FilePool creation to succeed std::this_thread::sleep_for(std::chrono::seconds(60)); PeerId leaderId; ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 获取三个chunkserver的FilePool的pool容量 + // Obtain the pool capacity of FilePool for three chunkservers std::shared_ptr fs(LocalFsFactory::CreateFs( FileSystemType::EXT4, "")); std::vector Peer1ChunkPoolSize; @@ -270,14 +270,14 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); - // 目前只有chunk文件才会从FilePool中取 - // raft snapshot meta 和 conf epoch文件直接从文件系统创建 + // Currently, only chunk files are retrieved from FilePool + // raft snapshot meta and conf epoch files are created directly from the file system ASSERT_EQ(20, Peer1ChunkPoolSize.size()); ASSERT_EQ(20, Peer2ChunkPoolSize.size()); ASSERT_EQ(20, Peer3ChunkPoolSize.size()); LOG(INFO) << "write 1 start"; - // 发起 read/write, 写数据会触发chunkserver从FilePool取chunk + // Initiate read/write, writing data will trigger chunkserver to fetch chunks from FilePool WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -287,8 +287,8 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { loop); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, and there may be outdated replica operations + // So take a nap first to prevent concurrent statistics of file information ::sleep(1*snapshotTimeoutS); Peer1ChunkPoolSize.clear(); @@ -298,12 +298,12 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,ChunkFilePool容量少一个 + // After writing the data, ChunkFilePool has one less capacity ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // shutdown 某个非 leader 的 peer + // shutdown a non leader peer PeerId shutdownPeerid; if (0 == ::strcmp(leaderId.to_string().c_str(), peer1.to_string().c_str())) { @@ -317,13 +317,13 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { leaderId.to_string().c_str())); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeerid)); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 + // wait snapshot, to ensure it triggers the snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as the snapshot file, + // and it will delete the previous snapshot file. The deleted file will be reclaimed into the FilePool. + // So overall, this snapshot creation will only result in the datastore retrieving a file from the FilePool, + // but a snapshot is taken and then returned. ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 2 start"; WriteThenReadVerify(leaderId, logicPoolId, @@ -343,20 +343,20 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { fs->List(copysetdir2+"/chunkfilepool", &Peer2ChunkPoolSize); fs->List(copysetdir3+"/chunkfilepool", &Peer3ChunkPoolSize); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(19, Peer2ChunkPoolSize.size()); ASSERT_EQ(19, Peer3ChunkPoolSize.size()); - // wait snapshot, 保证能够触发打快照 - // 本次打快照,raft会从FilePool取一个文件作为快照文件 - // 然后会把上一次的快照文件删除,删除过的文件会被回收到FilePool - // 所以总体上本次写入只会导致datastore从FilePool取文件 - // 但是快照取了一个又放回去了一个 + // wait snapshot, to ensure it triggers snapshot creation. + // In this snapshot creation, Raft will retrieve a file from the FilePool as the snapshot file. + // Then, it will delete the previous snapshot file, and the deleted file will be reclaimed into the FilePool. + // So, overall, this snapshot creation will only result in the datastore retrieving a file from the FilePool, + // but it involves taking one snapshot and returning another to the FilePool. ::sleep(1.5*snapshotTimeoutS); - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 3 start"; - // 增加chunkid,使chunkserver端的chunk又被取走一个 + // Add a chunkid to remove another chunk from the chunkserver side WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -378,7 +378,7 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 写完数据后,FilePool容量少一个 + // After writing the data, the FilePool capacity is reduced by one if (shutdownPeerid == peer1) { ASSERT_EQ(19, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); @@ -388,15 +388,15 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { } ASSERT_EQ(18, Peer3ChunkPoolSize.size()); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeerid, false, true, false)); ASSERT_EQ(0, cluster.WaitLeader(&leaderId)); - // 读出来验证一遍 + // Read it out and verify it again ReadVerify(leaderId, logicPoolId, copysetId, chunkId + 1, length, ch + 2, loop); LOG(INFO) << "write 4 start"; - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderId, logicPoolId, copysetId, @@ -445,8 +445,8 @@ TEST_F(RaftSnapFilePoolTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "chunk pool2 size = " << Peer2ChunkPoolSize.size(); LOG(INFO) << "chunk pool3 size = " << Peer3ChunkPoolSize.size(); - // 当前的raftsnapshot filesystem只存取chunk文件 - // meta文件遵守原有逻辑,直接通过文件系统创建,所以这里只有两个chunk被取出 + // The current raftsnapshot filesystem only accesses chunk files + // The meta file follows the original logic and is created directly through the file system, so only two chunks are extracted here ASSERT_EQ(18, Peer1ChunkPoolSize.size()); ASSERT_EQ(18, Peer2ChunkPoolSize.size()); ASSERT_EQ(18, Peer3ChunkPoolSize.size()); diff --git a/test/chunkserver/server.cpp b/test/chunkserver/server.cpp index d6f5d9aa97..db260d32b4 100644 --- a/test/chunkserver/server.cpp +++ b/test/chunkserver/server.cpp @@ -142,7 +142,7 @@ int main(int argc, char *argv[]) { std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); const uint32_t kMaxChunkSize = 16 * 1024 * 1024; - // TODO(yyk) 这部分实现不太优雅,后续进行重构 + // The implementation of TODO(yyk) is not very elegant, and will be refactored in the future std::string copysetUri = FLAGS_copyset_dir + "/copysets"; CopysetNodeOptions copysetNodeOptions; copysetNodeOptions.ip = FLAGS_ip; diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3ddf32f27e..01f24cabd5 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - // 失败的情况下不应删除 + // Should not be deleted in case of failure EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); diff --git a/test/client/client_common_unittest.cpp b/test/client/client_common_unittest.cpp index d7601e19f1..f938b8d728 100644 --- a/test/client/client_common_unittest.cpp +++ b/test/client/client_common_unittest.cpp @@ -28,20 +28,20 @@ namespace curve { namespace client { TEST(ClientCommon, PeerAddrTest) { - // 默认构造函数创建的成员变量内容为空 + // The member variable content created by the default constructor is empty PeerAddr chunkaddr; ASSERT_TRUE(chunkaddr.IsEmpty()); EndPoint ep; str2endpoint("127.0.0.1:8000", &ep); - // 从已有的endpoint创建PeerAddr,变量内容非空 + // Create PeerAddr from an existing endpoint, with non empty variable content PeerAddr caddr(ep); ASSERT_FALSE(caddr.IsEmpty()); ASSERT_EQ(caddr.addr_.port, 8000); ASSERT_STREQ("127.0.0.1:8000:0", caddr.ToString().c_str()); - // reset置位后成员变量内容为空 + // After resetting, the member variable content is empty caddr.Reset(); ASSERT_TRUE(caddr.IsEmpty()); @@ -49,7 +49,7 @@ TEST(ClientCommon, PeerAddrTest) { PeerAddr caddr2; ASSERT_TRUE(caddr2.IsEmpty()); - // 从字符串中解析出地址信息,字符串不符合解析格式返回-1,"ip:port:index" + // Resolve address information from the string, if the string does not conform to the parsing format, return -1, "ip:port:index" std::string ipaddr1("127.0.0.1"); ASSERT_EQ(-1, caddr2.Parse(ipaddr1)); std::string ipaddr2("127.0.0.q:9000:0"); @@ -61,11 +61,11 @@ TEST(ClientCommon, PeerAddrTest) { std::string ipaddr5("127.0.0.1001:9000:0"); ASSERT_EQ(-1, caddr2.Parse(ipaddr5)); - // 从字符串解析地址成功后,成员变量即为非空 + // After successfully resolving the address from the string, the member variable becomes non empty ASSERT_EQ(0, caddr2.Parse(ipaddr)); ASSERT_FALSE(caddr2.IsEmpty()); - // 验证非空成员变量是否为预期值 + // Verify if the non empty member variable is the expected value EndPoint ep1; str2endpoint("127.0.0.1:9000", &ep1); ASSERT_EQ(caddr2.addr_, ep1); diff --git a/test/client/client_mdsclient_metacache_unittest.cpp b/test/client/client_mdsclient_metacache_unittest.cpp index cfae5506e1..4bb23a208b 100644 --- a/test/client/client_mdsclient_metacache_unittest.cpp +++ b/test/client/client_mdsclient_metacache_unittest.cpp @@ -170,7 +170,7 @@ TEST_F(MDSClientTest, Createfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, globalclient->Create(filename.c_str(), userinfo, len)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -223,7 +223,7 @@ TEST_F(MDSClientTest, MkDir) { globalclient->Mkdir(dirpath.c_str(), userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -272,7 +272,7 @@ TEST_F(MDSClientTest, Closefile) { ret = mdsclient_.CloseFile(filename.c_str(), userinfo, "sessid"); ASSERT_EQ(ret, LIBCURVE_ERROR::OK); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -411,7 +411,7 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, Write(fd, nullptr, 0, 0)); ASSERT_EQ(LIBCURVE_ERROR::OK, Read(fd, nullptr, 0, 0)); - // 测试关闭文件 + // Test closing file ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); @@ -426,7 +426,7 @@ TEST_F(MDSClientTest, Openfile) { ASSERT_EQ(LIBCURVE_ERROR::OK, AioWrite(fd, &aioctx)); ASSERT_EQ(LIBCURVE_ERROR::OK, AioRead(fd, &aioctx)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -515,7 +515,7 @@ TEST_F(MDSClientTest, Renamefile) { ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rename(userinfo, filename1, filename2)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -611,7 +611,7 @@ TEST_F(MDSClientTest, Extendfile) { ASSERT_EQ(-1 * LIBCURVE_ERROR::NO_SHRINK_BIGGER_FILE, globalclient->Extend(filename1, userinfo, newsize)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -691,14 +691,14 @@ TEST_F(MDSClientTest, Deletefile) { ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Unlink(filename1, userinfo)); - // 设置delete force + //Set delete force fiu_init(0); fiu_enable("test/client/fake/fakeMDS/forceDeleteFile", 1, nullptr, 0); ASSERT_EQ(-1 * LIBCURVE_ERROR::NOT_SUPPORT, globalclient->Unlink(filename1, userinfo, true)); fiu_disable("test/client/fake/fakeMDS/forceDeleteFile"); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -783,7 +783,7 @@ TEST_F(MDSClientTest, Rmdir) { ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Rmdir(filename1, userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -831,7 +831,7 @@ TEST_F(MDSClientTest, StatFile) { ASSERT_EQ(fstat.ctime, 12345678); ASSERT_EQ(fstat.length, 4 * 1024 * 1024 * 1024ul); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -890,7 +890,7 @@ TEST_F(MDSClientTest, GetFileInfo) { ASSERT_EQ(finfo->segmentsize, 1 * 1024 * 1024 * 1024ul); ASSERT_EQ(finfo->blocksize, hasBlockSize ? blocksize : 4096); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -1222,7 +1222,7 @@ TEST_F(MDSClientTest, GetLeaderTest) { mc.UpdateCopysetInfo(1234, 1234, cslist); - // 测试复制组里第三个addr为leader + // The third addr in the test replication group is the leader curve::chunkserver::GetLeaderResponse2 response1; curve::common::Peer *peer1 = new curve::common::Peer(); peer1->set_address(peerinfo_3.internalAddr.ToString()); @@ -1245,9 +1245,9 @@ TEST_F(MDSClientTest, GetLeaderTest) { butil::str2endpoint("127.0.0.1", 29122, &expected); EXPECT_EQ(expected, leaderep); - // 测试拉取新leader失败,需要到mds重新fetch新的serverlist - // 当前新leader是3,尝试再刷新leader,这个时候会从1, 2获取leader - // 但是这时候leader找不到了,于是就会触发向mds重新拉取最新的server list + // The test failed to retrieve the new leader, and a new serverlist needs to be retrieved from the mds + // The current new leader is 3. Try refreshing the leader again, and at this time, the leader will be obtained from 1 and 2 + // But at this point, the leader cannot be found, so it will trigger a new pull of the latest server list from the mds brpc::Controller controller11; controller11.SetFailed(-1, "error"); FakeReturn fakeret111(&controller11, static_cast(&response1)); @@ -1279,14 +1279,14 @@ TEST_F(MDSClientTest, GetLeaderTest) { cliservice2.CleanInvokeTimes(); cliservice3.CleanInvokeTimes(); - // 向当前集群中拉取leader,然后会从mds一侧获取新server list + // Pull the leader from the current cluster, and then obtain a new server list from the mds side EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, true)); - // getleader请求会跳过当前leader + // The getleader request will skip the current leader EXPECT_EQ(0, cliservice3.GetInvokeTimes()); - // 因为从mds获取新的copyset信息了,所以其leader信息被重置了,需要重新获取新leader - // 获取新新的leader,这时候会从1,2,3,4这四个server拉取新leader,并成功获取新leader + // Because the new copyset information was obtained from the mds, its leader information has been reset and a new leader needs to be obtained + // Obtain a new leader, which will be pulled from servers 1, 2, 3, and 4 and successfully obtain the new leader std::string leader = "10.182.26.2:29123:0"; peer1 = new curve::common::Peer(); peer1->set_address(leader); @@ -1309,7 +1309,7 @@ TEST_F(MDSClientTest, GetLeaderTest) { cliservice3.CleanInvokeTimes(); cliservice4.CleanInvokeTimes(); - // refresh为false,所以只会从metacache中获取,不会发起rpc请求 + // Refresh is false, so it will only be obtained from the metacache and will not initiate rpc requests EXPECT_EQ(0, mc.GetLeader(1234, 1234, &ckid, &leaderep, false)); EXPECT_EQ(expected, leaderep); EXPECT_EQ(0, cliservice1.GetInvokeTimes()); @@ -1317,8 +1317,8 @@ TEST_F(MDSClientTest, GetLeaderTest) { EXPECT_EQ(0, cliservice3.GetInvokeTimes()); EXPECT_EQ(0, cliservice4.GetInvokeTimes()); - // 测试新增一个leader,该节点不在配置组内, 然后通过向mds - // 查询其chunkserverInfo之后, 将其成功插入metacache + // Add a new leader to the test, which is not in the configuration group, and then add it to the mds + // After querying its chunkserverInfo, successfully insert it into the metacache curve::common::Peer *peer7 = new curve::common::Peer(); leader = "10.182.26.2:29124:0"; peer7->set_address(leader); @@ -1407,7 +1407,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { FInfo finfo; curve::mds::FileInfo *info = new curve::mds::FileInfo; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -1424,7 +1424,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 认证失败 + // Authentication failed curve::mds::CreateCloneFileResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -1437,7 +1437,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { mdsclient_.CreateCloneFile("source", "destination", userinfo, 10 * 1024 * 1024, 0, 4 * 1024 * 1024, 0, 0, "default", &finfo)); - // 请求成功 + // Request successful info->set_id(5); curve::mds::CreateCloneFileResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); @@ -1463,7 +1463,7 @@ TEST_F(MDSClientTest, CreateCloneFile) { TEST_F(MDSClientTest, CompleteCloneMeta) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -1479,7 +1479,7 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -1490,7 +1490,7 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneMeta("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); @@ -1506,7 +1506,7 @@ TEST_F(MDSClientTest, CompleteCloneMeta) { TEST_F(MDSClientTest, CompleteCloneFile) { std::string filename = "/1_userinfo_"; - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -1522,7 +1522,7 @@ TEST_F(MDSClientTest, CompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::FAILED, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 认证失败 + // Authentication failed curve::mds::SetCloneFileStatusResponse response1; response1.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -1533,7 +1533,7 @@ TEST_F(MDSClientTest, CompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::AUTHFAIL, mdsclient_.CompleteCloneFile("destination", userinfo)); - // 请求成功 + // Request successful curve::mds::SetCloneFileStatusResponse response2; response2.set_statuscode(::curve::mds::StatusCode::kOK); @@ -1608,7 +1608,7 @@ TEST_F(MDSClientTest, ChangeOwner) { ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->ChangeOwner(filename1, "newowner", userinfo)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -1796,7 +1796,7 @@ TEST_F(MDSClientTest, ListDir) { ASSERT_EQ(-1 * LIBCURVE_ERROR::INTERNAL_ERROR, globalclient->Listdir(filename1, userinfo, &filestatVec)); - // 设置rpc失败,触发重试 + // Failed to set rpc, triggering retry brpc::Controller cntl; cntl.SetFailed(-1, "failed"); @@ -1859,7 +1859,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { using GetLeaderResponse2 = curve::chunkserver::GetLeaderResponse2; void SetUp() override { - // 添加service,并启动server + // Add a service and start the server for (int i = 0; i < kChunkServerNum; ++i) { auto &chunkserver = chunkServers[i]; auto &fakeCliService = fakeCliServices[i]; @@ -1886,7 +1886,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { externalAddrs[i] = PeerAddr(endpoint); } - // 设置copyset peer信息 + //Set copyset peer information for (int i = 0; i < kChunkServerNum; ++i) { curve::client::CopysetPeerInfo peerinfo; peerinfo.peerID = i + 1; @@ -1971,7 +1971,7 @@ class ServiceHelperGetLeaderTest : public MDSClientTest { }; TEST_F(ServiceHelperGetLeaderTest, NormalTest) { - // 测试复制组里第一个chunkserver为leader + // Test the first chunkserver in the replication group as the leader GetLeaderResponse2 response = MakeResponse(internalAddrs[0]); FakeReturn fakeret0(nullptr, static_cast(&response)); @@ -1993,7 +1993,7 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第二次拉取新的leader,直接跳过第一个chunkserver,查找第2,3两个 + // Test pulling a new leader for the second time, skip the first chunkserver directly, and search for the second and third two int32_t currentLeaderIndex = 0; curve::client::PeerAddr currentLeader = internalAddrs[currentLeaderIndex]; @@ -2012,7 +2012,7 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { ResetAllFakeCliService(); - // 测试第三次获取leader,会跳过第二个chunkserver,重试1/3 + // Testing for the third time obtaining the leader will skip the second chunkserver and retry 1/3 currentLeaderIndex = 1; currentLeader = internalAddrs[currentLeaderIndex]; @@ -2034,13 +2034,13 @@ TEST_F(ServiceHelperGetLeaderTest, NormalTest) { } TEST_F(ServiceHelperGetLeaderTest, RpcDelayTest) { - // 设置第三个chunkserver为leader + //Set the third chunkserver as the leader const auto currentLeaderIndex = 2; const auto ¤tLeader = internalAddrs[2]; SetGetLeaderResponse(currentLeader); - // 再次GetLeader会向chunkserver 1/2 发送请求 - // 在chunksever GetLeader service 中加入sleep,触发backup request + //GetLeader will send a request to chunkserver 1/2 again + // Add a sleep in the chunksever GetLeader service to trigger a backup request fakeCliServices[0].SetDelayMs(200); fakeCliServices[1].SetDelayMs(200); @@ -2063,15 +2063,15 @@ TEST_F(ServiceHelperGetLeaderTest, RpcDelayAndExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader,GetLeader会向chunkserver 1/2发送请求 + //Set the third chunkserver as the leader, and GetLeader will send a request to chunkserver 1/2 const auto currentLeaderIndex = 2; const auto ¤tLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 设置第一个chunkserver GetLeader service 延迟 + //Set the delay for the first chunkserver GetLeader service fakeCliServices[0].SetDelayMs(200); - // 设置第二个chunkserver 返回对应的错误码 + //Set the second chunkserver to return the corresponding error code for (auto errCode : exceptionErrCodes) { fakeCliServices[1].SetErrorCode(errCode); brpc::Controller controller; @@ -2105,13 +2105,13 @@ TEST_F(ServiceHelperGetLeaderTest, AllChunkServerExceptionTest) { std::vector exceptionErrCodes{ENOENT, EAGAIN, EHOSTDOWN, ECONNREFUSED, ECONNRESET, brpc::ELOGOFF}; - // 设置第三个chunkserver为leader + //Set the third chunkserver as the leader const auto currentLeaderIndex = 2; const auto ¤tLeader = internalAddrs[currentLeaderIndex]; SetGetLeaderResponse(currentLeader); - // 另外两个chunkserver都返回对应的错误码 + // The other two chunkservers both return corresponding error codes for (auto errCode : exceptionErrCodes) { fakeCliServices[0].SetErrorCode(errCode); fakeCliServices[1].SetErrorCode(errCode); diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index a0704c5b58..bc3d50af88 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -83,14 +83,14 @@ TEST(MetricTest, ChunkServer_MetricTest) { ClientConfig cc; ASSERT_EQ(0, cc.Init(configpath.c_str())); - // filename必须是全路径 + //The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + //Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -147,13 +147,13 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + //Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + //Read write timeout retry mds.EnableNetUnstable(8000); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -166,7 +166,7 @@ TEST(MetricTest, ChunkServer_MetricTest) { ASSERT_EQ(-2, ret); - // 4次正确读写,4次超时读写,超时会引起重试,重试次数为3,数据量最大是8192 + //4 correct reads and writes, 4 timeout reads and writes, timeout will cause retries, retry count is 3, and the maximum data volume is 8192 ASSERT_EQ(fm->inflightRPCNum.get_value(), 0); ASSERT_EQ(fm->userRead.qps.count.get_value(), 2); ASSERT_EQ(fm->userWrite.qps.count.get_value(), 2); @@ -214,14 +214,14 @@ TEST(MetricTest, SuspendRPC_MetricTest) { FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT - // filename必须是全路径 + //The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + //Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -266,13 +266,13 @@ TEST(MetricTest, SuspendRPC_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + //Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + //Read write timeout retry mds.EnableNetUnstable(100); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 5606204b83..0a124a520f 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -80,7 +80,7 @@ TEST(ClientSession, LeaseTaskTest) { FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid curve::client::EndPoint ep; butil::str2endpoint("127.0.0.1", 9176, &ep); PeerId pd(ep); diff --git a/test/client/client_unstable_helper_test.cpp b/test/client/client_unstable_helper_test.cpp index cbb62891a5..b49de63725 100644 --- a/test/client/client_unstable_helper_test.cpp +++ b/test/client/client_unstable_helper_test.cpp @@ -48,7 +48,7 @@ TEST(UnstableHelperTest, normal_test) { chunkservers.emplace_back(std::make_pair(i, ep)); } - // 先对每个chunkserver进行10次连续超时 + //First, perform 10 consecutive timeouts on each chunkserver for (const auto& cs : chunkservers) { for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(cs.first); @@ -59,8 +59,8 @@ TEST(UnstableHelperTest, normal_test) { } } - // 再对每个chunkserver增加一次超时 - // 前两个是chunkserver unstable状态,第三个是server unstable + //Add another timeout to each chunkserver + //The first two are in the chunkserver unstable state, and the third is in the server unstable state helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, helper.GetCurrentUnstableState( @@ -76,8 +76,8 @@ TEST(UnstableHelperTest, normal_test) { helper.GetCurrentUnstableState( chunkservers[2].first, chunkservers[2].second)); - // 继续增加超时次数 - // 这种情况下,每次都是chunkserver unstable + //Continue to increase the number of timeouts + //In this case, it is always chunkserver unstable helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, helper.GetCurrentUnstableState( @@ -91,7 +91,7 @@ TEST(UnstableHelperTest, normal_test) { helper.GetCurrentUnstableState( chunkservers[2].first, chunkservers[2].second)); - // 新chunkserver第一次超时,根据ip判断,可以直接设置为chunkserver unstable + //The first timeout of a new chunkserver can be directly set to chunkserver unstable based on the IP address butil::EndPoint ep; butil::str2endpoint("127.100.0.1:60999", &ep); auto chunkserver4 = std::make_pair(4, ep); @@ -102,7 +102,7 @@ TEST(UnstableHelperTest, normal_test) { helper.GetCurrentUnstableState( chunkserver4.first, chunkserver4.second)); - // 其他ip的chunkserver + //Chunkservers for other IPs butil::str2endpoint("127.200.0.1:60999", &ep); auto chunkserver5 = std::make_pair(5, ep); for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { diff --git a/test/client/client_userinfo_unittest.cpp b/test/client/client_userinfo_unittest.cpp index 6153f23e5e..4f8c942934 100644 --- a/test/client/client_userinfo_unittest.cpp +++ b/test/client/client_userinfo_unittest.cpp @@ -341,7 +341,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { delete delfakeret; } -// root user测试 +// Root user testing TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ClientConfigOption opt; opt.metaServerOpt.rpcRetryOpt.rpcTimeoutMs = 500; diff --git a/test/client/copyset_client_test.cpp b/test/client/copyset_client_test.cpp index b71383ec9d..a5efda523f 100644 --- a/test/client/copyset_client_test.cpp +++ b/test/client/copyset_client_test.cpp @@ -79,7 +79,7 @@ class CopysetClientTest : public testing::Test { brpc::Server *server_; }; -/* TODO(wudemiao) 当前 controller 错误不能通过 mock 返回 */ +/* TODO(wudemiao) current controller error cannot be returned through mock */ int gWriteCntlFailedCode = 0; int gReadCntlFailedCode = 0; @@ -465,7 +465,7 @@ TEST_F(CopysetClientTest, write_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -507,7 +507,7 @@ TEST_F(CopysetClientTest, write_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 + // The retry sleep time set in the configuration file is 5000, as there is no triggering of underlying index backoff, so there will be no sleep between retries uint64_t start = TimeUtility::GetTimeofDayUs(); curve::common::CountDownEvent cond(1); @@ -547,10 +547,10 @@ TEST_F(CopysetClientTest, write_error_test) { reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试超时时间为5000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 + // The retry timeout set by the configuration file is 5000 because the chunkserver setting returns timeout + // Causing the triggering of an exponential backoff of the underlying timeout time, increasing the interval between each retry. Retrying three times is normal, only 3 * 1000 sleep is required + // But after increasing the index backoff, the timeout will increase to 1000+2000+2000=5000 + // Adding random factors, the three retry times should be greater than 7000 and less than 8000 uint64_t start = TimeUtility::GetTimeofDayMs(); reqCtx->done_ = reqDone; @@ -590,10 +590,10 @@ TEST_F(CopysetClientTest, write_error_test) { reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*5000 - // 但是增加指数退避之后,睡眠间隔将增加到10000 + 20000 = 30000 - // 加上随机因子,三次重试时间应该大于29000, 且小于50000 + // The retry sleep time set in the configuration file is 5000 because the chunkserver setting returns timeout + // Causing triggering of low-level exponential backoff, increasing the interval between each retry. Retrying three times is normal, only 3 * 5000 sleep is required + // But after increasing the index retreat, the sleep interval will increase to 10000+20000=30000 + // Adding random factors, the three retry times should be greater than 29000 and less than 50000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; @@ -618,7 +618,7 @@ TEST_F(CopysetClientTest, write_error_test) { gWriteCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -649,7 +649,7 @@ TEST_F(CopysetClientTest, write_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -689,7 +689,7 @@ TEST_F(CopysetClientTest, write_error_test) { ASSERT_EQ(1, fm.writeRPC.redirectQps.count.get_value()); } - /* 不是 leader,没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -726,7 +726,7 @@ TEST_F(CopysetClientTest, write_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -766,7 +766,7 @@ TEST_F(CopysetClientTest, write_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -803,14 +803,14 @@ TEST_F(CopysetClientTest, write_error_test) { auto elpased = curve::common::TimeUtility::GetTimeofDayUs() - startTimeUs; // chunkserverOPRetryIntervalUS = 5000 - // 每次redirect睡眠500us,共重试2次(chunkserverOPMaxRetry=3,判断时大于等于就返回,所以共只重试了两次) - // 所以总共耗费时间大于1000us + // redirect sleep for 500us each time and retry a total of 2 times (chunkserverOPMaxRetry=3, returns if it is greater than or equal to, so only two retries were made) + // So the total time spent is greater than 1000us ASSERT_GE(elpased, 1000); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); ASSERT_EQ(3, fm.writeRPC.redirectQps.count.get_value()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -842,7 +842,7 @@ TEST_F(CopysetClientTest, write_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -981,9 +981,9 @@ TEST_F(CopysetClientTest, write_failed_test) { reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要超时49*500 - // 但是增加指数退避之后,超时时间将增加到49*1000 = 49000 + // The retry timeout set by the configuration file is 500 because the chunkserver setting returns timeout + // Causing the triggering of an exponential backoff of the underlying timeout time, increasing the interval between each retry. Retrying 50 times normally only requires a timeout of 49 * 500 + // But after increasing the index backoff, the timeout will increase to 49 * 1000=49000 uint64_t start = TimeUtility::GetTimeofDayMs(); reqCtx->done_ = reqDone; @@ -1022,9 +1022,9 @@ TEST_F(CopysetClientTest, write_failed_test) { reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000us - // 但是增加指数退避之后,睡眠间隔将增加到 + // The retry sleep time set in the configuration file is 5000us because the chunkserver setting returns timeout + // Causing triggering of low-level exponential backoff, increasing the interval between each retry. Retrying 50 times normally only requires 49 * 5000us of sleep + // But after increasing the index of retreat, the sleep interval will increase to // 10000 + 20000 + 40000... ~= 4650000 uint64_t start = TimeUtility::GetTimeofDayUs(); @@ -1113,9 +1113,9 @@ TEST_F(CopysetClientTest, read_failed_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试超时时间为500,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试50次正常只需要50*500 - // 但是增加指数退避之后,超时时间将增加到500 + 1000 + 2000... ~= 60000 + // The retry timeout set by the configuration file is 500 because the chunkserver setting returns timeout + // Causing the triggering of an exponential backoff of the underlying timeout time, increasing the interval between each retry. Retrying 50 times normally only requires 50 * 500 + // But after increasing the index retreat, the timeout will increase to 500+1000+2000...~=60000 uint64_t start = TimeUtility::GetTimeofDayMs(); curve::common::CountDownEvent cond(1); @@ -1146,7 +1146,7 @@ TEST_F(CopysetClientTest, read_failed_test) { gReadCntlFailedCode = 0; } - /* 设置 overload */ + /* Set overload */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1161,11 +1161,11 @@ TEST_F(CopysetClientTest, read_failed_test) { reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为5000us,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试50次正常只需要睡眠49*5000 - // 但是增加指数退避之后,睡眠间隔将增加到 + // The retry sleep time set in the configuration file is 5000us because the chunkserver setting returns timeout + // Causing triggering of low-level exponential backoff, increasing the interval between each retry. Retrying 50 times is normal, only requiring 49 * 5000 sleep + // But after increasing the index of retreat, the sleep interval will increase to // 10000 + 20000 + 40000 ... = 4650000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 + // Adding random factors, the three retry times should be greater than 2900 and less than 5000 uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; @@ -1242,7 +1242,7 @@ TEST_F(CopysetClientTest, read_error_test) { IOTracker iot(nullptr, nullptr, nullptr, &fm); iot.PrepareReadIOBuffers(1); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1313,7 +1313,7 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的重试睡眠时间为5000,因为没有触发底层指数退避,所以重试之间不会睡眠 + // The retry sleep time set in the configuration file is 5000, as there is no triggering of underlying index backoff, so there will be no sleep between retries uint64_t start = TimeUtility::GetTimeofDayUs(); curve::common::CountDownEvent cond(1); @@ -1350,10 +1350,10 @@ TEST_F(CopysetClientTest, read_error_test) { reqCtx->offset_ = 0; reqCtx->rawlength_ = len; - // 配置文件设置的超时时间为1000,因为chunkserver设置返回timeout - // 导致触发底层超时时间指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*1000 - // 但是增加指数退避之后,超时时间将增加到1000 + 2000 + 2000 = 5000 - // 加上随机因子,三次重试时间应该大于7000, 且小于8000 + // The timeout configured in the settings file is 1000, but due to chunk server timeout, it triggers exponential backoff, increasing the interval between retries. In normal conditions, + // three retries would only require a sleep time of 3 * 1000. However, with the added exponential + // backoff, the timeout intervals will increase to 1000 + 2000 + 2000 = 5000. Considering the random factor, + // the total time for three retries should be greater than 7000 and less than 8000. uint64_t start = TimeUtility::GetTimeofDayMs(); curve::common::CountDownEvent cond(1); @@ -1384,7 +1384,7 @@ TEST_F(CopysetClientTest, read_error_test) { gReadCntlFailedCode = 0; } - /* 设置 overload */ + /* Set overload */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1399,10 +1399,10 @@ TEST_F(CopysetClientTest, read_error_test) { reqDone->SetFileMetric(&fm); reqDone->SetIOTracker(&iot); - // 配置文件设置的重试睡眠时间为500,因为chunkserver设置返回timeout - // 导致触发底层指数退避,每次重试间隔增大。重试三次正常只需要睡眠3*500 - // 但是增加指数退避之后,睡眠间隔将增加到1000 + 2000 = 3000 - // 加上随机因子,三次重试时间应该大于2900, 且小于5000 + // The retry sleep time set in the configuration file is 500, but due to chunkserver timeouts, it triggers exponential backoff, increasing the interval between retries. In normal conditions, + // three retries would only require a sleep time of 3 * 500. However, with the added exponential + // backoff, the sleep intervals will increase to 1000 + 2000 = 3000. Considering the random factor, + // the total time for three retries should be greater than 2900 and less than 5000. uint64_t start = TimeUtility::GetTimeofDayUs(); reqCtx->done_ = reqDone; @@ -1426,7 +1426,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_LT(end - start, 3 * 5000); } - /* 其他错误 */ + /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1457,7 +1457,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1495,7 +1495,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1537,7 +1537,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1579,7 +1579,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1613,7 +1613,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1645,7 +1645,7 @@ TEST_F(CopysetClientTest, read_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ; @@ -1732,7 +1732,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -1826,7 +1826,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -1858,7 +1858,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -1897,7 +1897,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -1933,7 +1933,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -1973,7 +1973,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -2008,7 +2008,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -2041,7 +2041,7 @@ TEST_F(CopysetClientTest, read_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -2148,7 +2148,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2208,7 +2208,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2239,7 +2239,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2280,7 +2280,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2315,7 +2315,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2356,7 +2356,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2390,7 +2390,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2422,7 +2422,7 @@ TEST_F(CopysetClientTest, delete_snapshot_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2531,7 +2531,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; @@ -2589,7 +2589,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - // /* 其他错误 */ + // /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2653,7 +2653,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2687,7 +2687,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2725,7 +2725,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2758,7 +2758,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2789,7 +2789,7 @@ TEST_F(CopysetClientTest, create_s3_clone_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2899,7 +2899,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2955,7 +2955,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -2984,7 +2984,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -3016,7 +3016,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -3055,7 +3055,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -3092,7 +3092,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -3124,7 +3124,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -3154,7 +3154,7 @@ TEST_F(CopysetClientTest, recover_chunk_error_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -3254,7 +3254,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { FileMetric fm("test"); IOTracker iot(nullptr, nullptr, nullptr, &fm); - /* 非法参数 */ + /* Illegal parameter */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3306,7 +3306,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_NE(0, reqDone->GetErrorCode()); gReadCntlFailedCode = 0; } - /* 其他错误 */ + /* Other errors */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3333,7 +3333,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, reqDone->GetErrorCode()); } - /* 不是 leader,返回正确的 leader */ + /* Not a leader, returning the correct leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3367,7 +3367,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 成功 */ + /* Not a leader, but did not return a leader, refreshing the meta cache succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3398,7 +3398,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但是没有返回 leader,刷新 meta cache 失败 */ + /* Not a leader, but did not return a leader, refreshing the meta cache failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3433,7 +3433,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } - /* 不是 leader,但返回的是错误 leader */ + /* Not a leader, but returned an incorrect leader */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3463,7 +3463,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 依然失败 */ + /* copyset does not exist, updating leader still failed */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3491,7 +3491,7 @@ TEST_F(CopysetClientTest, get_chunk_info_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, reqDone->GetErrorCode()); } - /* copyset 不存在,更新 leader 成功 */ + /* copyset does not exist, updating leader succeeded */ { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::GET_CHUNK_INFO; @@ -3620,7 +3620,7 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { // create fake chunkserver service FakeChunkServerService fakechunkservice; - // 设置cli服务 + // Set up cli service CliServiceFake fakeCliservice; FakeCurveFSService curvefsService; @@ -3670,10 +3670,10 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { ASSERT_EQ(LIBCURVE_ERROR::OK, fileinstance.Open()); - // 设置文件版本号 + // Set file version number fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); - // 发送写请求,并等待sec秒后检查io是否返回 + // Send a write request and wait for seconds to check if IO returns auto startWriteAndCheckResult = [&fileinstance](int sec)-> bool { // NOLINT CurveAioContext* aioctx = new CurveAioContext(); char buffer[4096]; @@ -3684,29 +3684,29 @@ TEST(ChunkServerBackwardTest, ChunkServerBackwardTest) { aioctx->op = LIBCURVE_OP::LIBCURVE_OP_WRITE; aioctx->cb = WriteCallBack; - // 下发写请求 + // Send write request fileinstance.AioWrite(aioctx, UserDataType::RawBuffer); std::this_thread::sleep_for(std::chrono::seconds(sec)); return gWriteSuccessFlag; }; - // 第一次写成功,并更新chunkserver端的文件版本号 + // Successfully written for the first time and updated the file version number on the chunkserver side ASSERT_TRUE(startWriteAndCheckResult(3)); - // 设置一个旧的版本号去写 + // Set an old version number to write fileinstance.GetIOManager4File()->SetLatestFileSn(kOldFileSn); gWriteSuccessFlag = false; - // chunkserver返回backward,重新获取版本号后还是旧的版本 + // chunkserver returns the feedback, and after obtaining the version number again, it is still the old version // IO hang ASSERT_FALSE(startWriteAndCheckResult(3)); - // 更新版本号为正常状态 + // Update version number to normal state fileinstance.GetIOManager4File()->SetLatestFileSn(kNewFileSn); std::this_thread::sleep_for(std::chrono::seconds(1)); - // 上次写请求成功 + // Last write request successful ASSERT_EQ(true, gWriteSuccessFlag); server.Stop(0); @@ -3763,8 +3763,8 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { IOTracker iot(nullptr, nullptr, nullptr, &fm); { - // redirect情况下, chunkserver返回新的leader - // 重试之前不会睡眠 + // In the redirect case, chunkserver returns a new leader + // Will not sleep until retry RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3809,15 +3809,15 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回新的leader id,所以重试之前不会进行睡眠 + // Returns a new leader ID, so there will be no sleep before retrying ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver返回旧leader - // 重试之前会睡眠 + // In the redirect case, chunkserver returns the old leader + // Sleep before retrying RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3859,15 +3859,15 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回同样的leader id,重试之前会进行睡眠 + // Return the same leader ID and sleep before retrying ASSERT_GE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到新leader + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain a new leader RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3908,15 +3908,15 @@ TEST_F(CopysetClientTest, retry_rpc_sleep_test) { cond.Wait(); auto endUs = curve::common::TimeUtility::GetTimeofDayUs(); - // 返回新的leader id,所以重试之前不会进行睡眠 + // Returns a new leader id, so there will be no sleep before retrying ASSERT_LE(endUs - startUs, sleepUsBeforeRetry / 10); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, reqDone->GetErrorCode()); } { - // redirect情况下,chunkserver未返回leader - // 主动refresh获取到旧leader + // In the redirect case, chunkserver did not return a leader + // Actively refresh to obtain old leader RequestContext* reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; reqCtx->idinfo_ = ChunkIDInfo(chunkId, logicPoolId, copysetId); @@ -3978,7 +3978,7 @@ class TestRunnedRequestClosure : public RequestClosure { bool runned_ = false; }; -// 测试session失效后,重试请求会被重新放入请求队列 +// After the test session fails, the retry request will be placed back in the request queue TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { MockRequestScheduler requestScheduler; CopysetClient copysetClient; @@ -3988,7 +3988,7 @@ TEST(CopysetClientBasicTest, TestReScheduleWhenSessionNotValid) { ASSERT_EQ(0, copysetClient.Init(&metaCache, ioSenderOption, &requestScheduler, nullptr)); - // 设置session not valid + // Set session not valid copysetClient.StartRecycleRetryRPC(); { diff --git a/test/client/fake/client_workflow_test.cpp b/test/client/fake/client_workflow_test.cpp index c42a9371ba..3884e528a8 100644 --- a/test/client/fake/client_workflow_test.cpp +++ b/test/client/fake/client_workflow_test.cpp @@ -76,7 +76,7 @@ int main(int argc, char ** argv) { LOG(FATAL) << "Fail to init config"; } - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // uint64_t size = FLAGS_test_disk_size; @@ -86,7 +86,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); diff --git a/test/client/fake/client_workflow_test4snap.cpp b/test/client/fake/client_workflow_test4snap.cpp index 9aa9a75e23..eaea4d376a 100644 --- a/test/client/fake/client_workflow_test4snap.cpp +++ b/test/client/fake/client_workflow_test4snap.cpp @@ -79,7 +79,7 @@ int main(int argc, char ** argv) { mds.Initialize(); mds.StartService(); if (FLAGS_create_copysets) { - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 8200, &ep); PeerId pd(ep); diff --git a/test/client/fake/fakeChunkserver.h b/test/client/fake/fakeChunkserver.h index 6ebbbeffcf..8cb93d84c4 100644 --- a/test/client/fake/fakeChunkserver.h +++ b/test/client/fake/fakeChunkserver.h @@ -209,7 +209,7 @@ class FakeChunkService : public ChunkService { } private: - // wait4netunstable用来模拟网络延时,当打开之后,每个读写rpc会停留一段时间再返回 + // wait4netunstable is used to simulate network latency. When turned on, each read/write rpc will pause for a period of time before returning bool wait4netunstable; uint64_t waittimeMS; bool rpcFailed; diff --git a/test/client/fake/fakeMDS.h b/test/client/fake/fakeMDS.h index e29f251c26..9410eec31d 100644 --- a/test/client/fake/fakeMDS.h +++ b/test/client/fake/fakeMDS.h @@ -187,7 +187,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { } if (!strcmp(request->owner().c_str(), "root")) { - // 当user为root用户的时候需要检查其signature信息 + // When the user is root, it is necessary to check their signature information std::string str2sig = Authenticator::GetString2Signature( request->date(), request->owner()); @@ -199,7 +199,7 @@ class FakeMDSCurveFSService : public curve::mds::CurveFSService { retrytimes_++; - // 检查请求内容是全路径 + // Check that the request content is full path auto checkFullpath = [&]() { LOG(INFO) << "request filename = " << request->filename(); ASSERT_EQ(request->filename()[0], '/'); diff --git a/test/client/inflight_rpc_control_test.cpp b/test/client/inflight_rpc_control_test.cpp index 8d6d4de1ee..c21a888abf 100644 --- a/test/client/inflight_rpc_control_test.cpp +++ b/test/client/inflight_rpc_control_test.cpp @@ -72,7 +72,7 @@ TEST(InflightRPCTest, TestInflightRPC) { int maxInflightNum = 8; { - // 测试inflight数量 + // Number of inflight tests InflightControl control; control.SetMaxInflightNum(maxInflightNum); ASSERT_EQ(0, control.GetCurrentInflightNum()); @@ -89,7 +89,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试GetInflightTokan与ReleaseInflightToken的并发 + // Testing the concurrency of GetInflightTokan and ReleaseInflightToken InflightControl control; control.SetMaxInflightNum(maxInflightNum); @@ -123,7 +123,7 @@ TEST(InflightRPCTest, TestInflightRPC) { } { - // 测试WaitInflightAllComeBack + // Testing WaitInflightAllComeBack InflightControl control; control.SetMaxInflightNum(maxInflightNum); for (int i = 1; i <= maxInflightNum; ++i) { @@ -148,13 +148,13 @@ TEST(InflightRPCTest, TestInflightRPC) { } TEST(InflightRPCTest, FileCloseTest) { - // 测试在文件关闭的时候,lese续约失败不会调用iomanager已析构的资源 - // lease时长10s,在lease期间仅续约一次,一次失败就会调用iomanager - // block IO,这时候其实调用的是scheduler的LeaseTimeoutBlockIO +// Test that when the lease renewal fails at the time of file closure, it will not invoke the already destructed resources of the IO manager. +// The lease duration is 10 seconds, and only one renewal is allowed during the lease period. +// If the renewal fails, it will trigger the IO manager's block IO, which actually calls the LeaseTimeoutBlockIO of the scheduler. IOOption ioOption; ioOption.reqSchdulerOpt.ioSenderOpt.failRequestOpt.chunkserverRPCTimeoutMS = 10000; - // 设置inflight RPC最大数量为1 + // Set the maximum number of inflight RPCs to 1 ioOption.ioSenderOpt.inflightOpt.fileMaxInFlightRPCNum = 1; std::condition_variable cv; @@ -200,7 +200,7 @@ TEST(InflightRPCTest, FileCloseTest) { LeaseExecutor lease(lopt, userinfo, nullptr, iomanager); for (int j = 0; j < 5; j++) { - // 测试iomanager退出之后,lease再去调用其scheduler资源不会crash + // After testing the iomanager exit, please call its scheduler resource again without crashing lease.InvalidLease(); } @@ -214,11 +214,10 @@ TEST(InflightRPCTest, FileCloseTest) { } }; - // 并发两个线程,一个线程启动iomanager初始化,然后反初始化 - // 另一个线程启动lease续约,然后调用iomanager使其block IO - // 预期:并发两个线程,lease线程续约失败即使在iomanager线程 - // 退出的同时去调用其block IO接口也不会出现并发竞争共享资源的 - // 场景。 + // Concurrently run two threads: one thread initializes the IO manager and then deinitializes it, + // while the other thread initiates lease renewal and then calls the IO manager to make it block IO. + // Expectation: Concurrent execution of the two threads should not result in concurrent competition + // for shared resources, even if the lease thread fails to renew while the IO manager thread exits. std::thread t1(f1); std::thread t2(f2); diff --git a/test/client/iotracker_splitor_unittest.cpp b/test/client/iotracker_splitor_unittest.cpp index 1f423250fa..85a10c9bf5 100644 --- a/test/client/iotracker_splitor_unittest.cpp +++ b/test/client/iotracker_splitor_unittest.cpp @@ -184,7 +184,7 @@ class IOTrackerSplitorTest : public ::testing::Test { curvefsservice.SetCloseFile(closefileret); /** - * 3. 设置GetOrAllocateSegmentresponse + * 3. Set GetOrAllocateSegmentresponse */ curve::mds::GetOrAllocateSegmentResponse* response = new curve::mds::GetOrAllocateSegmentResponse(); @@ -265,7 +265,7 @@ class IOTrackerSplitorTest : public ::testing::Test { curvefsservice.SetRefreshSession(refreshfakeret, nullptr); /** - * 5. 设置topology返回值 + * 5. Set topology return value */ ::curve::mds::topology::GetChunkServerListInCopySetsResponse* response_1 = new ::curve::mds::topology::GetChunkServerListInCopySetsResponse; @@ -603,8 +603,8 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetSegmentFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get segment接口返回失败,底层task thread层会一直重试, - // 但是不会阻塞上层继续向下发送IO请求 + // When the 'get segment' interface on the MDS (Metadata Server) side is reported as failed, the underlying task thread layer will keep retrying. + // However, this will not block the upper layer from continuing to send IO requests downward. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount > 0) { @@ -636,8 +636,8 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { ioctxmana->SetRequestScheduler(mockschuler); ioctxmana->SetIOOpt(fopt.ioOpt); - // offset 10*1024*1024*1024ul 不在metacache里 - // client回去mds拿segment和serverlist + // The offset offset 10*1024*1024*1024ul is not in the metacache. + // The client will request the segment and serverlist from the MDS (Metadata Server). CurveAioContext* aioctx = new CurveAioContext; aioctx->offset = 10*1024*1024*1024ul; aioctx->length = chunk_size + 8 * 1024; @@ -652,8 +652,8 @@ TEST_F(IOTrackerSplitorTest, ManagerAsyncStartWriteReadGetServerlistFail) { memset(data + 4 * 1024, 'b', chunk_size); memset(data + 4 * 1024 + chunk_size, 'c', 4 * 1024); - // 设置mds一侧get server list接口返回失败,底层task thread层会一直重试 - // 但是不会阻塞,上层继续向下发送IO请求 + // If the "get server list" interface on the MDS side is reported as a failure, the underlying task thread layer will keep retrying. + // However, this won't block the process, and the upper layer will continue sending IO requests downstream. int reqcount = 32; auto threadFunc1 = [&]() { while (reqcount > 0) { @@ -969,7 +969,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { cloneSourceInfo.length = 10ull * 1024 * 1024 * 1024; // 10GB cloneSourceInfo.segmentSize = 1ull * 1024 * 1024 * 1024; // 1GB - // 源卷只分配了第一个和最后一个segment + // The source volume has only allocated the first and last segments cloneSourceInfo.allocatedSegmentOffsets.insert(0); cloneSourceInfo.allocatedSegmentOffsets.insert(cloneSourceInfo.length - cloneSourceInfo.segmentSize); @@ -980,14 +980,14 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ChunkIndex chunkIdx = 0; RequestSourceInfo sourceInfo; - // 第一个chunk + // First chunk sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_TRUE(sourceInfo.IsValid()); ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 克隆卷最后一个chunk + // Clone the last chunk of the volume chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize - 1; LOG(INFO) << "clone length = " << fileInfo.sourceInfo.length << ", chunk size = " << fileInfo.chunksize @@ -1000,8 +1000,8 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_EQ(sourceInfo.cloneFileSource, fileInfo.sourceInfo.name); ASSERT_EQ(sourceInfo.cloneFileOffset, 10720641024); - // 源卷未分配segment - // 读取每个segment的第一个chunk + // Source volume unassigned segment + // Read the first chunk of each segment for (int i = 1; i < 9; ++i) { ChunkIndex chunkIdx = i * cloneSourceInfo.segmentSize / fileInfo.chunksize; @@ -1012,7 +1012,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_EQ(sourceInfo.cloneFileOffset, 0); } - // 超过长度 + // Exceeding length chunkIdx = fileInfo.sourceInfo.length / fileInfo.chunksize; sourceInfo = @@ -1021,7 +1021,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 源卷长度为0 + // Source volume length is 0 chunkIdx = 0; fileInfo.sourceInfo.length = 0; metaCache.UpdateFileInfo(fileInfo); @@ -1031,7 +1031,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { ASSERT_TRUE(sourceInfo.cloneFileSource.empty()); ASSERT_EQ(sourceInfo.cloneFileOffset, 0); - // 不是read/write请求 + // Not a read/write request chunkIdx = 1; ioTracker.SetOpType(OpType::READ_SNAP); sourceInfo = @@ -1045,7 +1045,7 @@ TEST_F(IOTrackerSplitorTest, RequestSourceInfoTest) { chunkIdx = 0; - // 不是克隆卷 + // Not a clone volume sourceInfo = Splitor::CalcRequestSourceInfo(&ioTracker, &metaCache, chunkIdx); ASSERT_FALSE(sourceInfo.IsValid()); diff --git a/test/client/lease_executor_test.cpp b/test/client/lease_executor_test.cpp index 4f5629ad8b..13a75321d4 100644 --- a/test/client/lease_executor_test.cpp +++ b/test/client/lease_executor_test.cpp @@ -16,7 +16,7 @@ /* * Project: curve - * File Created: 2019年11月20日 + * File Created: November 20, 2019 * Author: wuhanqing */ diff --git a/test/client/libcbd_libcurve_test.cpp b/test/client/libcbd_libcurve_test.cpp index 3f582b8a3c..63f29c05ae 100644 --- a/test/client/libcbd_libcurve_test.cpp +++ b/test/client/libcbd_libcurve_test.cpp @@ -72,7 +72,7 @@ class TestLibcbdLibcurve : public ::testing::Test { mds_ = new FakeMDS(filename); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9110, &ep); braft::PeerId pd(ep); diff --git a/test/client/libcurve_interface_unittest.cpp b/test/client/libcurve_interface_unittest.cpp index 99d35696b4..d8f828d49d 100644 --- a/test/client/libcurve_interface_unittest.cpp +++ b/test/client/libcurve_interface_unittest.cpp @@ -88,7 +88,7 @@ TEST_F(TestLibcurveInterface, InterfaceTest) { memcpy(userinfo.owner, "userinfo", 9); memcpy(userinfo.password, "", 1); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -244,7 +244,7 @@ TEST_F(TestLibcurveInterface, FileClientTest) { FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); @@ -375,7 +375,7 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { mdsclient_.Initialize(fopt.metaServerOpt); fileinstance_.Initialize("/test", &mdsclient_, userinfo, fopt); - // 设置leaderid + // set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -413,9 +413,9 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // 正常情况下只有第一次会去get leader + // Normally, getting the leader will only occur the first time. ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // LeaderMayChange remains in a normal state for copyset leader that has been written to in metacache. ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -430,17 +430,17 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { } } - // 设置chunkservice返回失败,那么mds每次重试都会去拉新的leader - // 127.0.0.1:9151:0,127.0.0.1:9152:0,127.0.0.1:9153:0是当前集群信息 - // 127.0.0.1:9151对应第一个chunkservice - // 设置rpc失败,会导致client将该chunkserverid上的leader copyset都标记为 + // If chunkservice returns failure, MDS will retry and fetch new leaders each time. + // The current cluster information is: 127.0.0.1:9151:0, 127.0.0.1:9152:0, 127.0.0.1:9153:0. + // 127.0.0.1:9151 corresponds to the first chunkservice. + // An RPC failure causes the client to mark all leader copysets on that chunkserver id as // leadermaychange chunkservice[0]->SetRPCFailed(); // -现在写第二个chunk,第二个chunk与第一个chunk不在同一个copyset里,这次读写失败 +Now, write to the second chunk; as it does not belong to the same copyset as the first chunk, this read and write attempt fails. ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 + // Obtain chunkid information for the second chunk. ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -449,32 +449,32 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo1.cpid_ || i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // 这两个leader为该chunkserver的copyset的LeaderMayChange置位 + // Set LeaderMayChange for both of these leaders of the chunkserver's copysets. ASSERT_TRUE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange + // For copysets without current leader information, set LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } chunkservice[0]->ReSetRPCFailed(); - // 再次写第二个chunk,这时候获取leader成功后,会将LeaderMayChange置位fasle - // 第一个chunk对应的copyset依然LeaderMayChange为true + // Write to the second chunk again; after successfully obtaining a leader, LeaderMayChange will be set to false. + // LeaderMayChange for the copyset corresponding to the first chunk remains true. ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2. ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange保持原有状态 + // LeaderMayChange for copyset1 remains unchanged. ASSERT_TRUE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange + // For copysets without current leader information, set LeaderMayChange directly. ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } @@ -485,33 +485,33 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { butil::str2endpoint("127.0.0.1", 9152, &ep2); PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rpc失败,迫使copyset切换leader,切换leader后读写成功 + // Force an RPC failure to trigger copyset leader switch; successful read and write after leader switch. chunkservice[0]->SetRPCFailed(); - // 读写第一个和第二个chunk + // Read and write to the first and second chunks. ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 0 * chunk_size, length)); ASSERT_EQ(1, cliservice->GetInvokeTimes()); - // 这个时候 + // At this point for (int i = 0; i < FLAGS_copyset_num; i++) { CopysetPeerInfo ci = mc->GetCopysetinfo(FLAGS_logic_pool_id, i); if (i == chunkinfo2.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset2的LeaderMayChange置位 + // Set LeaderMayChange for copyset2 ASSERT_FALSE(ci.LeaderMayChange()); } else if (i == chunkinfo1.cpid_) { ASSERT_NE(-1, ci.GetCurrentLeaderIndex()); - // copyset1的LeaderMayChange置位 + // Set LeaderMayChange for copyset1 ASSERT_FALSE(ci.LeaderMayChange()); } else { - // 对于当前copyset没有leader信息的就直接置位LeaderMayChange + // For the current copyset without leader information, directly set LeaderMayChange ASSERT_EQ(-1, ci.GetCurrentLeaderIndex()); ASSERT_TRUE(ci.LeaderMayChange()); } } - // 验证copyset id信息更新 + // Verify the update of copyset ID information. // copyset id = 888, chunkserver id = 100 101 102 // copyset id = 999, chunkserver id = 102 103 104 CopysetPeerInfo csinfo1; @@ -568,8 +568,8 @@ TEST(TestLibcurveInterface, ChunkserverUnstableTest) { curve::client::CopysetPeerInfo peer9(103, addr); csinfo3.csinfos_.push_back(peer9); - // 更新copyset信息,chunkserver 104的信息被清除 - // 100,和 101上添加了新的copyset信息 + // Update copyset information, clearing the information for chunkserver 104. + // New copyset information has been added on chunk servers 100 and 101. mc->UpdateChunkserverCopysetInfo(FLAGS_logic_pool_id, csinfo3); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 888)); ASSERT_TRUE(mc->CopysetIDInfoIn(100, FLAGS_logic_pool_id, 999)); @@ -596,7 +596,7 @@ TEST_F(TestLibcurveInterface, InterfaceExceptionTest) { // open not create file ASSERT_EQ(-1 * LIBCURVE_ERROR::FAILED, Open(filename.c_str(), &userinfo)); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9106, &ep); PeerId pd(ep); @@ -684,7 +684,7 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { fileinstance_.Initialize( "/UnstableChunkserverTest", mdsclient_, userinfo, OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -722,7 +722,7 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -740,19 +740,19 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.EnableNetUnstable(10000); - // 写2次,读2次,每次请求重试3次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // unstable阈值为10,所以第11次请求返回时,对应的chunkserver被标记为unstable - // leader在对应chunkserver上的copyset会设置leaderMayChange为true - // 下次发起请求时,会先去刷新leader信息, - // 由于leader没有发生改变,而且延迟仍然存在 - // 所以第12次请求仍然超时,leaderMayChange仍然为true + // Write twice, read twice, and retry three times per request + // Due to the delay set on the chunkserver side, each request will time out + // The unstable threshold is 10, so when the 11th request returns, the corresponding chunkserver is marked as unstable + // The copyset of the leader on the corresponding chunkserver will set leaderMayChange to true + // The next time a request is made, the leader information will be refreshed first, + // Since the leader has not changed and the delay still exists + // So the 12th request still timed out, and leaderMayChange is still true ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Read(buffer, 1 * chunk_size, length)); - // 获取第2个chunk的chunkid信息 + // Obtain chunkid information for the second chunk ChunkIDInfo_t chunkinfo2; rc = mc->GetChunkInfoByIndex(1, &chunkinfo2); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -769,9 +769,9 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { } } - // 当copyset处于unstable状态时 - // 不进入超时时间指数退避逻辑,rpc超时时间设置为默认值 - // 所以每个请求总时间为3s,4个请求需要12s + // When copyset is in an unstable state + // Do not enter the timeout index backoff logic, and set the rpc timeout to the default value + // So the total time for each request is 3 seconds, and 4 requests require 12 seconds auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); @@ -783,9 +783,9 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { mds.DisableNetUnstable(); - // 取消延迟,再次读写第2个chunk - // 获取leader信息后,会将leaderMayChange置为false - // 第一个chunk对应的copyset依赖leaderMayChange为true + // Cancel delay and read and write the second chunk again + // After obtaining the leader information, the leaderMayChange will be set to false + // The copyset dependency for the first chunk, leaderMayChange, is true ASSERT_EQ(8192, fileinstance_.Write(buffer, 1 * chunk_size, length)); ASSERT_EQ(8192, fileinstance_.Read(buffer, 1 * chunk_size, length)); for (int i = 0; i < FLAGS_copyset_num; ++i) { @@ -809,7 +809,7 @@ TEST_F(TestLibcurveInterface, UnstableChunkserverTest) { PeerId pd2(ep2); cliservice->SetPeerID(pd2); - // 设置rcp返回失败,迫使copyset切换leader, 切换leader后读写成功 + // Failed to set rcp return, forcing copyset to switch leaders. After switching leaders, read and write succeeded chunkservice[0]->SetRPCFailed(); ASSERT_EQ(8192, fileinstance_.Write(buffer, 0 * chunk_size, length)); @@ -872,7 +872,7 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { fileinstance_.Initialize("/ResumeTimeoutBackoff", mdsclient_, userinfo, OpenFlags{}, fopt); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9151, &ep); PeerId pd(ep); @@ -909,7 +909,7 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { ASSERT_EQ(length, fileinstance_.Write(buffer, offset, length)); ASSERT_EQ(length, fileinstance_.Read(buffer, offset, length)); - // metacache中被写过的copyset leadermaychange都处于正常状态 + // The copyset leadermaychanges that have been written in Metacache are all in a normal state ChunkIDInfo_t chunkinfo1; MetaCacheErrorType rc = mc->GetChunkInfoByIndex(0, &chunkinfo1); ASSERT_EQ(rc, MetaCacheErrorType::OK); @@ -927,17 +927,17 @@ TEST_F(TestLibcurveInterface, ResumeTimeoutBackoff) { mds.EnableNetUnstable(10000); - // 写2次, 每次请求重试11次 - // 因为在chunkserver端设置了延迟,导致每次请求都会超时 - // 第一个请求重试11次,会把chunkserver标记为unstable + // Write twice, retry 11 times per request + // Due to the delay set on the chunkserver side, each request will time out + // The first request will be retried 11 times and the chunkserver will be marked as unstable ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); - // 第二个写请求,由于其对应的copyset leader may change - // 第1次请求超时时间为1s - // 后面4次重试由于leader may change所以超时时间也是1s - // 第5-11次请求由于重试次数超过minRetryTimesForceTimeoutBackoff - // 所以超时时间都进入指数退避,为8s * 6 = 48s - // 所以第二次写请求,总共耗时53s,并写入失败 + // The second write request, due to its corresponding copyset leader may change + // The first request timeout is 1 second + // The timeout for the next four retries is also 1 second due to the leader may change + // 5th to 11th requests due to more than minRetryTimesForceTimeoutBackoff retries + // So all timeout times enter exponential backoff, which is 8s * 6 = 48s + // So the second write request took a total of 53 seconds and failed to write auto start = TimeUtility::GetTimeofDayMs(); ASSERT_EQ(-2, fileinstance_.Write(buffer, 1 * chunk_size, length)); auto elapsedMs = TimeUtility::GetTimeofDayMs() - start; @@ -961,7 +961,7 @@ TEST_F(TestLibcurveInterface, InterfaceStripeTest) { uint64_t size = 100 * 1024 * 1024 * 1024ul; FileClient fc; - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9115, &ep); PeerId pd(ep); diff --git a/test/client/mds_failover_test.cpp b/test/client/mds_failover_test.cpp index e95912f610..750afa7993 100644 --- a/test/client/mds_failover_test.cpp +++ b/test/client/mds_failover_test.cpp @@ -50,7 +50,7 @@ namespace curve { namespace client { -// 测试mds failover切换状态机 +// Testing mds failover switching state machine TEST(MDSChangeTest, MDSFailoverTest) { RPCExcutorRetryPolicy rpcexcutor; @@ -70,10 +70,10 @@ TEST(MDSChangeTest, MDSFailoverTest) { int mds1RetryTimes = 0; int mds2RetryTimes = 0; - // 场景1: mds0、1、2, currentworkindex = 0, mds0, mds1, mds2都宕机, - // 发到其rpc都以EHOSTDOWN返回,导致上层client会一直切换mds重试 - // 按照0-->1-->2持续进行 - // 每次rpc返回-EHOSTDOWN,会直接触发RPC切换。最终currentworkindex没有切换 + // Scenario 1: mds0, 1, 2, currentworkindex=0, mds0, mds1, and mds2 are all down, + // All RPCs sent to them are returned as EHOSTDOWN, resulting in upper level clients constantly switching to mds and retrying + // Continue according to 0-->1-->2 + // Every time rpc returns -EHOSTDOWN, it will directly trigger RPC switching. The final currentworkindex did not switch auto task1 = [&](int mdsindex, uint64_t rpctimeoutMS, brpc::Channel* channel, brpc::Controller* cntl)->int { if (mdsindex == 0) { @@ -91,12 +91,12 @@ TEST(MDSChangeTest, MDSFailoverTest) { }; uint64_t startMS = TimeUtility::GetTimeofDayMs(); - // 控制面接口调用, 1000为本次rpc的重试总时间 + // Control surface interface call, 1000 is the total retry time of this RPC rpcexcutor.DoRPCTask(task1, 1000); uint64_t endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GT(endMS - startMS, 1000 - 1); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 + // This retry is a polling retry, and the number of retries per mds should be close to and not exceed the total number of mds ASSERT_LT(abs(mds0RetryTimes - mds1RetryTimes), 3); ASSERT_LT(abs(mds2RetryTimes - mds1RetryTimes), 3); @@ -106,10 +106,10 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_GT(endMS - startMS, 3000 - 1); ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 场景2:mds0、1、2, currentworkindex = 0, mds0宕机,并且这时候将正在工作的 - // mds索引切换到index2,预期client在index=0重试之后会直接切换到index 2 - // mds2这这时候直接返回OK,rpc停止重试。 - // 预期client总共发送两次rpc,一次发送到mds0,另一次发送到mds2,跳过中间的 + // Scenario 2: mds0, 1, 2, currentworkindex = 0, mds0 goes down, and it will be working at this time + // Mds index switches to index2, and it is expected that the client will directly switch to index2 after retrying with index = 0 + // At this point, mds2 directly returns OK and rpc stops trying again. + // Expected client to send a total of two RPCs, one to mds0 and the other to mds2, skipping the middle // mds1。 mds0RetryTimes = 0; mds1RetryTimes = 0; @@ -129,7 +129,7 @@ TEST(MDSChangeTest, MDSFailoverTest) { if (mdsindex == 2) { mds2RetryTimes++; - // 本次返回ok,那么RPC应该成功了,不会再重试 + // If OK is returned this time, then RPC should have succeeded and will not try again return LIBCURVE_ERROR::OK; } @@ -144,10 +144,10 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_EQ(mds1RetryTimes, 0); ASSERT_EQ(mds2RetryTimes, 1); - // 场景3:mds0、1、2,currentworkindex = 1,且mds1宕机了, - // 这时候会切换到mds0和mds2 - // 在切换到2之后,mds1又恢复了,这时候切换到mds1,然后rpc发送成功。 - // 这时候的切换顺序为1->2->0, 1->2->0, 1。 + // Scenario 3: mds0, 1, 2, currentworkindex = 1, and mds1 is down, + // At this point, it will switch to mds0 and mds2 + // After switching to 2, mds1 resumed, and then switched to mds1, and the rpc was successfully sent. + // At this point, the switching order is 1->2->0, 1->2->0, 1. mds0RetryTimes = 0; mds1RetryTimes = 0; mds2RetryTimes = 0; @@ -161,7 +161,7 @@ TEST(MDSChangeTest, MDSFailoverTest) { if (mdsindex == 1) { mds1RetryTimes++; - // 当在mds1上重试到第三次的时候向上返回成功,停止重试 + // When retrying on mds1 for the third time, success is returned upwards and the retry is stopped if (mds1RetryTimes == 3) { return LIBCURVE_ERROR::OK; } @@ -186,11 +186,11 @@ TEST(MDSChangeTest, MDSFailoverTest) { ASSERT_EQ(1, rpcexcutor.GetCurrentWorkIndex()); - // 场景4:mds0、1、2, currentWorkindex = 0, 但是发往mds1的rpc请求一直超时 - // 最后rpc返回结果是超时. - // 对于超时的mds节点会连续重试mds.maxFailedTimesBeforeChangeMDS后切换 - // 当前mds.maxFailedTimesBeforeChangeMDS=2。 - // 所以重试逻辑应该是:0->0->1->2, 0->0->1->2, 0->0->1->2, ... + // Scenario 4: mds0, 1, 2, currentWorkindex = 0, but the rpc request to mds1 consistently times out + // The final result returned by rpc is timeout + // For timeout mds nodes, they will continuously retry mds.maxFailedTimesBeforeChangeMDS and switch + // Current mds.maxFailedTimesBeforeChangeMDS=2. + // So the retry logic should be: 0->0->1->2, 0->0->1->2, 0->0->1->2, ... LOG(INFO) << "case 4"; mds0RetryTimes = 0; mds1RetryTimes = 0; @@ -222,12 +222,12 @@ TEST(MDSChangeTest, MDSFailoverTest) { endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GT(endMS - startMS, 3000 - 1); ASSERT_EQ(0, rpcexcutor.GetCurrentWorkIndex()); - // 本次重试为轮询重试,每个mds的重试次数应该接近,不超过总的mds数量 + // This retry is a polling retry, and the number of retries per mds should be close to and not exceed the total number of mds ASSERT_GT(mds0RetryTimes, mds1RetryTimes + mds2RetryTimes); - // 场景5:mds0、1、2,currentWorkIndex = 0 - // 但是rpc请求前10次全部返回EHOSTDOWN - // mds重试睡眠10ms,所以总共耗时100ms时间 + // Scenario 5: mds0, 1, 2, currentWorkIndex = 0 + // But the first 10 requests from rpc all returned EHOSTDOWN + // Mds retries sleep for 10ms, so it takes a total of 100ms rpcexcutor.SetCurrentWorkIndex(0); int hostDownTimes = 10; auto task5 = [&](int mdsindex, uint64_t rpctimeoutMs, @@ -241,11 +241,11 @@ TEST(MDSChangeTest, MDSFailoverTest) { return 0; }; startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task5, 10000); // 总重试时间10s + rpcexcutor.DoRPCTask(task5, 10000); // Total retry time 10s endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GE(endMS - startMS, 100); - // 场景6: mds在重试过程中一直返回EHOSTDOWN,总共重试5s + // Scenario 6: mds keeps returning EHOSTDOWN during the retry process, with a total of 5 retries rpcexcutor.SetCurrentWorkIndex(0); int calledTimes = 0; auto task6 = [&](int mdsindex, uint64_t rpctimeoutMs, @@ -256,12 +256,12 @@ TEST(MDSChangeTest, MDSFailoverTest) { }; startMS = TimeUtility::GetTimeofDayMs(); - rpcexcutor.DoRPCTask(task6, 5 * 1000); // 总重试时间5s + rpcexcutor.DoRPCTask(task6, 5 * 1000); // Total retry time 5s endMS = TimeUtility::GetTimeofDayMs(); ASSERT_GE(endMS - startMS, 5 * 1000 - 1); - // 每次hostdown情况下,睡眠10ms,总重试时间5s,所以总共重试次数小于等于500次 - // 为了尽量减少误判,所以加入10次冗余 + // In each hostdown situation, sleep for 10ms and the total retry time is 5s, so the total number of retries is less than or equal to 500 times + // In order to minimize false positives, 10 redundant attempts were added LOG(INFO) << "called times " << calledTimes; ASSERT_LE(calledTimes, 510); } diff --git a/test/client/mock/mock_chunkservice.h b/test/client/mock/mock_chunkservice.h index 3891ce60bf..17b1cb0e2a 100644 --- a/test/client/mock/mock_chunkservice.h +++ b/test/client/mock/mock_chunkservice.h @@ -42,7 +42,7 @@ using ::testing::Invoke; using curve::chunkserver::ChunkService; using curve::chunkserver::CHUNK_OP_STATUS; -/* 当前仅仅模拟单 chunk read/write */ +/*Currently, only single chunk read/write is simulated*/ class FakeChunkServiceImpl : public ChunkService { public: virtual ~FakeChunkServiceImpl() {} @@ -132,8 +132,8 @@ class FakeChunkServiceImpl : public ChunkService { private: std::set chunkIds_; - /* 由于 bthread 栈空间的限制,这里不会开很大的空间,如果测试需要更大的空间 - * 请在堆上申请 */ + /* Due to the limitations of the bthread stack space, there will not be a large amount of space opened here. If testing requires more space + * Please apply on the pile*/ char chunk_[4096] = {0}; }; diff --git a/test/client/request_scheduler_test.cpp b/test/client/request_scheduler_test.cpp index 9ff0636530..3f700dd8b7 100644 --- a/test/client/request_scheduler_test.cpp +++ b/test/client/request_scheduler_test.cpp @@ -258,7 +258,7 @@ TEST(RequestSchedulerTest, fake_server_test) { } // read snapshot - // 1. 先 write snapshot + // 1. Write snapshot first { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::WRITE; @@ -282,7 +282,7 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, requestScheduler.ScheduleRequest(reqCtxs)); cond.Wait(); } - // 2. 再 read snapshot 验证一遍 + // 2. Verify with read snapshot again { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::READ_SNAP; @@ -309,7 +309,7 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(reqCtx->readData_, expectReadData); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 3. 在 delete snapshot + // 3. In delete snapshot { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -332,7 +332,7 @@ TEST(RequestSchedulerTest, fake_server_test) { cond.Wait(); ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 4. 重复 delete snapshot + // 4. Repeat delete snapshot { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::DELETE_SNAP; @@ -357,7 +357,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试 get chunk info + // Test get chunk info { ChunkInfoDetail chunkInfo; RequestContext *reqCtx = new FakeRequestContext(); @@ -383,7 +383,7 @@ TEST(RequestSchedulerTest, fake_server_test) { reqDone->GetErrorCode()); } - // 测试createClonechunk + // Test createClonechunk { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::CREATE_CLONE; @@ -407,7 +407,7 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(0, reqDone->GetErrorCode()); } - // 测试recoverChunk + // Testing recoverChunk { RequestContext *reqCtx = new FakeRequestContext(); reqCtx->optype_ = OpType::RECOVER_CHUNK; @@ -505,7 +505,7 @@ TEST(RequestSchedulerTest, fake_server_test) { ASSERT_EQ(-1, reqDone->GetErrorCode()); } - /* 2. 并发测试 */ + /* 2. Concurrent testing */ curve::common::CountDownEvent cond(4 * kMaxLoop); auto func = [&]() { for (int i = 0; i < kMaxLoop; ++i) { @@ -578,11 +578,11 @@ TEST(RequestSchedulerTest, CommonTest) { MetaCache metaCache; FileMetric fm("test"); - // scheduleQueueCapacity 设置为 0 + // scheduleQueueCapacity set to 0 opt.scheduleQueueCapacity = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); - // threadpoolsize 设置为 0 + // threadpoolsize set to 0 opt.scheduleQueueCapacity = 4096; opt.scheduleThreadpoolSize = 0; ASSERT_EQ(-1, sche.Init(opt, &metaCache, &fm)); diff --git a/test/client/request_sender_test.cpp b/test/client/request_sender_test.cpp index 92882bac79..8d515d6998 100644 --- a/test/client/request_sender_test.cpp +++ b/test/client/request_sender_test.cpp @@ -96,7 +96,7 @@ class RequestSenderTest : public ::testing::Test { }; TEST_F(RequestSenderTest, BasicTest) { - // 非法的 port + // Illegal port std::string leaderStr = "127.0.0.1:65539"; butil::EndPoint leaderAddr; ChunkServerID leaderId = 1; diff --git a/test/common/bitmap_test.cpp b/test/common/bitmap_test.cpp index 8bb85b01ad..e6acd37ff6 100644 --- a/test/common/bitmap_test.cpp +++ b/test/common/bitmap_test.cpp @@ -62,7 +62,7 @@ TEST(BitmapTEST, constructor_test) { delete[] mem; } - // 测试拷贝构造 + // Test copy construction { Bitmap bitmap1(32); Bitmap bitmap2(bitmap1); @@ -72,7 +72,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试赋值操作 + // Test assignment operation { Bitmap bitmap1(32); Bitmap bitmap2(16); @@ -88,7 +88,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试比较操作符 + // Test Comparison Operator { Bitmap bitmap1(16); Bitmap bitmap2(16); @@ -229,7 +229,7 @@ TEST(BitmapTEST, divide_test) { vector clearRanges; vector setRanges; - // 所有位为0 + // All bits are 0 { bitmap.Clear(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -241,7 +241,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 所有位为1 + // All bits are 1 { bitmap.Set(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -253,7 +253,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为clear range,末尾为set range + // Two ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 16); bitmap.Set(17, 31); @@ -268,7 +268,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为 set range,末尾为 clear range + // Two ranges, starting with set range and ending with clear range { bitmap.Set(0, 16); bitmap.Clear(17, 31); @@ -283,7 +283,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 set range,中间为 clear range + // Three ranges, with set ranges at the beginning and end, and clear ranges in the middle { bitmap.Set(0, 8); bitmap.Clear(9, 25); @@ -301,7 +301,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 clear range,中间为 set range + // Three ranges, with clear ranges at the beginning and end, and set ranges in the middle { bitmap.Clear(0, 8); bitmap.Set(9, 25); @@ -319,7 +319,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 clear range,末尾为 set range + // Four ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 7); bitmap.Set(8, 15); @@ -340,7 +340,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 set range,末尾为 clear range + // Four ranges, starting with set range and ending with clear range { bitmap.Set(0, 7); bitmap.Clear(8, 15); @@ -361,7 +361,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 复杂场景随机偏移测试 + // Random offset testing for complex scenes { bitmap.Set(0, 5); bitmap.Clear(6, 9); diff --git a/test/common/channel_pool_test.cpp b/test/common/channel_pool_test.cpp index e327f6f82f..e021acc4b9 100644 --- a/test/common/channel_pool_test.cpp +++ b/test/common/channel_pool_test.cpp @@ -30,20 +30,20 @@ namespace common { TEST(Common, ChannelPool) { ChannelPool channelPool; ChannelPtr channelPtr; - // 地址非法,init失败 + // Illegal address, init failed std::string addr = "127.0.0.1:80000"; ASSERT_EQ(-1, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_FALSE(channelPtr); - // 地址合法,init成功 + // The address is legal, init succeeded addr = "127.0.0.1:8000"; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_TRUE(channelPtr); - // 同一个地址应该返回同一个channelPtr + // The same address should return the same channelPtr ChannelPtr channelPtr2; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr2)); ASSERT_TRUE(channelPtr2); ASSERT_EQ(channelPtr, channelPtr2); - // 清空 + // Clear channelPool.Clear(); } diff --git a/test/common/configuration_test.cpp b/test/common/configuration_test.cpp index 9dc770bcc8..de02cda6c0 100644 --- a/test/common/configuration_test.cpp +++ b/test/common/configuration_test.cpp @@ -129,52 +129,52 @@ TEST_F(ConfigurationTest, ListConfig) { std::map configs; configs = conf.ListConfig(); ASSERT_NE(0, configs.size()); - // 抽几个key来校验以下 + // Pick a few keys for validation. ASSERT_EQ(configs["test.int1"], "12345"); ASSERT_EQ(configs["test.bool1"], "0"); - // 如果key不存在,返回为空 + // If the key does not exist, return empty ASSERT_EQ(configs["xxx"], ""); } -// 覆盖原有配置 +// Overwrite the original configuration TEST_F(ConfigurationTest, SaveConfig) { bool ret; Configuration conf; conf.SetConfigPath(confFile_); - // 自定义配置项并保存 + // Customize configuration items and save them conf.SetStringValue("test.str1", "new"); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 重新加载配置项 + // Reload Configuration Items Configuration conf2; conf2.SetConfigPath(confFile_); ret = conf2.LoadConfig(); ASSERT_EQ(ret, true); - // 可以读取自定义配置项,原有配置项被覆盖,读取不到 + // Custom configuration items can be read, but the original configuration items are overwritten and cannot be read ASSERT_EQ(conf2.GetValue("test.str1"), "new"); ASSERT_EQ(conf2.GetValue("test.int1"), ""); } -// 读取当前配置写到其他路径 +// Read the current configuration and write to another path TEST_F(ConfigurationTest, SaveConfigToFileNotExist) { bool ret; - // 加载当前配置 + // Load current configuration Configuration conf; conf.SetConfigPath(confFile_); ret = conf.LoadConfig(); ASSERT_EQ(ret, true); - // 写配置到其他位置 + // Write configuration to another location std::string newFile("curve.conf.test2"); conf.SetConfigPath(newFile); ret = conf.SaveConfig(); ASSERT_EQ(ret, true); - // 从新配置文件加载,并读取某项配置来进行校验 + // Load from a new configuration file and read a certain configuration for verification Configuration newConf; newConf.SetConfigPath(newFile); ret = newConf.LoadConfig(); @@ -337,11 +337,11 @@ TEST_F(ConfigurationTest, TestMetric) { "{\"conf_name\":\"key1\",\"conf_value\":\"123\"}"); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key2").c_str(), "{\"conf_name\":\"key2\",\"conf_value\":\"1.230000\"}"); - // 还未设置时,返回空 + // When not yet set, return empty ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key3").c_str(), ""); - // 支持自动更新metric + // Support for automatic updating of metrics conf.SetIntValue("key1", 234); ASSERT_STREQ(bvar::Variable::describe_exposed("conf_metric_key1").c_str(), "{\"conf_name\":\"key1\",\"conf_value\":\"234\"}"); diff --git a/test/common/count_down_event_test.cpp b/test/common/count_down_event_test.cpp index 8bdc5c9681..d9e114d5fe 100644 --- a/test/common/count_down_event_test.cpp +++ b/test/common/count_down_event_test.cpp @@ -90,7 +90,7 @@ TEST(CountDownEventTest, basic) { } - /* 1. initCnt==Signal次数 */ + /* 1. InitCnt==Signal count */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); @@ -111,7 +111,7 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 2. initCnt signalCount; signalCount.store(0, std::memory_order_release); @@ -128,7 +128,7 @@ TEST(CountDownEventTest, basic) { std::thread t1(func); - /* 等到Signal次数>initCnt */ + /* Wait until SignalCount>initCnt */ while (true) { ::usleep(5); if (signalCount.load(std::memory_order_acquire) > kInitCnt) { @@ -141,13 +141,13 @@ TEST(CountDownEventTest, basic) { t1.join(); } - /* 3. initCnt>Signal次数 */ + /* 3. initCnt>SignalCount */ { std::atomic signalCount; signalCount.store(0, std::memory_order_release); const int kEventNum = 10; - /* kSignalEvent1 + kSignalEvent2等于kEventNum */ + /* kSignalEvent1 + kSignalEvent2 = kEventNum */ const int kSignalEvent1 = kEventNum - 5; const int kSignalEvent2 = 5; CountDownEvent cond(kEventNum); @@ -167,7 +167,7 @@ TEST(CountDownEventTest, basic) { }; std::thread waitThread(waitFunc); - /* 由于t1 唤醒的次数不够,所以waitThread会阻塞在wait那里 */ + /* Due to insufficient wake-up times for t1, waitThread will block at the wait location */ ASSERT_EQ(false, passWait.load(std::memory_order_acquire)); auto func2 = [&] { @@ -176,7 +176,7 @@ TEST(CountDownEventTest, basic) { cond.Signal(); } }; - /* 运行t2,补上不够的唤醒次数 */ + /* Run t2 to make up for insufficient wake-up times */ std::thread t2(func2); t1.join(); @@ -203,7 +203,7 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件未到达,超时返回,可以容许在一定的误差 + // The event did not arrive and returned after a timeout, allowing for a certain error ASSERT_GT(static_cast(elpased.count()), waitForMs-1000); t1.join(); @@ -226,7 +226,7 @@ TEST(CountDownEventTest, basic) { std::chrono::duration elpased = end - start; std::cerr << "elapsed: " << elpased.count() << std::endl; - // 事件达到,提前返回 + // Event reached, return early ASSERT_GT(waitForMs, static_cast(elpased.count())); t1.join(); diff --git a/test/common/lru_cache_test.cpp b/test/common/lru_cache_test.cpp index a5e9d65e19..c147322f37 100644 --- a/test/common/lru_cache_test.cpp +++ b/test/common/lru_cache_test.cpp @@ -33,26 +33,26 @@ namespace common { TEST(TestCacheMetrics, testall) { CacheMetrics cacheMetrics("LRUCache"); - // 1. 新增数据项 + // 1. Add Data Item cacheMetrics.UpdateAddToCacheCount(); ASSERT_EQ(1, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateAddToCacheBytes(1000); ASSERT_EQ(1000, cacheMetrics.cacheBytes.get_value()); - // 2. 移除数据项 + // 2. Remove Data Item cacheMetrics.UpdateRemoveFromCacheCount(); ASSERT_EQ(0, cacheMetrics.cacheCount.get_value()); cacheMetrics.UpdateRemoveFromCacheBytes(200); ASSERT_EQ(800, cacheMetrics.cacheBytes.get_value()); - // 3. cache命中 + // 3. cache hit ASSERT_EQ(0, cacheMetrics.cacheHit.get_value()); cacheMetrics.OnCacheHit(); ASSERT_EQ(1, cacheMetrics.cacheHit.get_value()); - // 4. cache未命中 + // 4. cache Misses ASSERT_EQ(0, cacheMetrics.cacheMiss.get_value()); cacheMetrics.OnCacheMiss(); ASSERT_EQ(1, cacheMetrics.cacheMiss.get_value()); @@ -63,7 +63,7 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { auto cache = std::make_shared>(maxCount, std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get uint64_t cacheSize = 0; for (int i = 1; i <= maxCount + 1; i++) { std::string eliminated; @@ -83,7 +83,7 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 第一个元素被剔出 + // 2. The first element is removed std::string res; ASSERT_FALSE(cache->Get(std::to_string(1), &res)); for (int i = 2; i <= maxCount + 1; i++) { @@ -91,17 +91,17 @@ TEST(CaCheTest, test_cache_with_capacity_limit) { ASSERT_EQ(std::to_string(i), res); } - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->Get("2", &res)); cacheSize -= std::to_string(2).size() * 2; ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); ASSERT_EQ(cacheSize, cache->GetCacheMetrics()->cacheBytes.get_value()); - // 4. 重复put + // 4. Repeat put std::string eliminated; cache->Put("4", "hello", &eliminated); ASSERT_TRUE(cache->Get("4", &res)); @@ -116,7 +116,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/get + // 1. Test put/get std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -125,7 +125,7 @@ TEST(CaCheTest, test_cache_with_capacity_no_limit) { ASSERT_EQ(std::to_string(i), res); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->Get("1", &res)); } @@ -231,7 +231,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_limit) { auto cache = std::make_shared>(maxCount, std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached uint64_t cacheSize = 0; for (int i = 1; i <= maxCount; i++) { cache->Put(std::to_string(i)); @@ -240,19 +240,19 @@ TEST(SglCaCheTest, test_cache_with_capacity_limit) { ASSERT_TRUE(cache->IsCached(std::to_string(i))); } - // 2. 第一个元素被剔出 + // 2. The first element is removed cache->Put(std::to_string(11)); ASSERT_FALSE(cache->IsCached(std::to_string(1))); - // 3. 测试删除元素 - // 删除不存在的元素 + // 3. Test Delete Element + // Delete non-existent elements cache->Remove("1"); - // 删除list中存在的元素 + // Delete elements present in the list cache->Remove("2"); ASSERT_FALSE(cache->IsCached("2")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); - // 4. 重复put + // 4. Repeat put cache->Put("4"); ASSERT_TRUE(cache->IsCached("4")); ASSERT_EQ(maxCount - 1, cache->GetCacheMetrics()->cacheCount.get_value()); @@ -262,7 +262,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { auto cache = std::make_shared>( std::make_shared("LruCache")); - // 1. 测试 put/IsCached + // 1. Test put/IsCached std::string res; for (int i = 1; i <= 10; i++) { std::string eliminated; @@ -271,7 +271,7 @@ TEST(SglCaCheTest, test_cache_with_capacity_no_limit) { ASSERT_FALSE(cache->IsCached(std::to_string(100))); } - // 2. 测试元素删除 + // 2. Test element deletion cache->Remove("1"); ASSERT_FALSE(cache->IsCached("1")); } diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index cb44a36b09..584aa0b738 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -47,7 +47,7 @@ int TestAdd2(int a, double b, CountDownEvent *cond) { } TEST(TaskThreadPool, basic) { - /* 测试线程池 start 入参 */ + /*Test thread pool start input parameter*/ { TaskThreadPool<> taskThreadPool; ASSERT_EQ(-1, taskThreadPool.Start(2, 0)); @@ -74,7 +74,7 @@ TEST(TaskThreadPool, basic) { } { - /* 测试不设置,此时为 INT_MAX */ + /*Test not set, at this time it is INT_ MAX*/ TaskThreadPool<> taskThreadPool; ASSERT_EQ(0, taskThreadPool.Start(4)); ASSERT_EQ(INT_MAX, taskThreadPool.QueueCapacity()); @@ -92,7 +92,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond1(1); taskThreadPool.Enqueue(TestAdd1, 1, 1.234, &cond1); cond1.Wait(); - /* TestAdd2 是有返回值的 function */ + /*TestAdd2 is a function with a return value*/ CountDownEvent cond2(1); taskThreadPool.Enqueue(TestAdd2, 1, 1.234, &cond2); cond2.Wait(); @@ -100,7 +100,7 @@ TEST(TaskThreadPool, basic) { taskThreadPool.Stop(); } - /* 基本运行 task 测试 */ + /*Basic task testing*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -133,14 +133,14 @@ TEST(TaskThreadPool, basic) { t2.join(); t3.join(); - /* 等待所有 task 执行完毕 */ + /*Wait for all tasks to complete execution*/ cond.Wait(); ASSERT_EQ(3 * kMaxLoop, runTaskCount.load(std::memory_order_acquire)); taskThreadPool.Stop(); } - /* 测试队列满了,push会阻塞 */ + /*The test queue is full, push will block*/ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -169,12 +169,12 @@ TEST(TaskThreadPool, basic) { ASSERT_EQ(kQueueCapacity, taskThreadPool.QueueCapacity()); ASSERT_EQ(kThreadNums, taskThreadPool.ThreadOfNums()); - /* 把线程池的所有处理线程都卡住了 */ + /*Stuck all processing threads in the thread pool*/ taskThreadPool.Enqueue(waitTask, &startRunCond1, &cond1); taskThreadPool.Enqueue(waitTask, &startRunCond2, &cond2); taskThreadPool.Enqueue(waitTask, &startRunCond3, &cond3); taskThreadPool.Enqueue(waitTask, &startRunCond4, &cond4); - /* 等待 waitTask1、waitTask2、waitTask3、waitTask4 都开始运行 */ + /*Wait for waitTask1, waitTask2, waitTask3, and waitTask4 to start running*/ startRunCond1.Wait(); startRunCond2.Wait(); startRunCond3.Wait(); @@ -186,7 +186,7 @@ TEST(TaskThreadPool, basic) { runTaskCount.fetch_add(1, std::memory_order_acq_rel); }; - /* 记录线程 push 到线程池 queue 的 task 数量 */ + /*Record the number of tasks from thread push to thread pool queue*/ std::atomic pushTaskCount1; std::atomic pushTaskCount2; std::atomic pushTaskCount3; @@ -208,7 +208,7 @@ TEST(TaskThreadPool, basic) { std::thread t2(std::bind(threadFunc, &pushTaskCount2)); std::thread t3(std::bind(threadFunc, &pushTaskCount3)); - /* 等待线程池 queue 被 push 满 */ + /*Waiting for thread pool queue to be pushed full*/ int pushTaskCount; while (true) { ::usleep(50); @@ -222,22 +222,22 @@ TEST(TaskThreadPool, basic) { } } - /* push 进去的 task 都没有被执行 */ + /*The tasks that were pushed in were not executed*/ ASSERT_EQ(0, runTaskCount.load(std::memory_order_acquire)); /** - * 此时,thread pool 的 queue 肯定 push 满了,且 push - * 满了之后就没法再 push 了 + *At this point, the thread pool queue must be full of push, and the push + *After it's full, it can't push anymore */ ASSERT_EQ(pushTaskCount, taskThreadPool.QueueCapacity()); ASSERT_EQ(taskThreadPool.QueueCapacity(), taskThreadPool.QueueSize()); - /* 将线程池中的线程都唤醒 */ + /*Wake up all threads in the thread pool*/ cond1.Signal(); cond2.Signal(); cond3.Signal(); cond4.Signal(); - /* 等待所有 task 执行完成 */ + /*Wait for all task executions to complete*/ while (true) { ::usleep(10); if (runTaskCount.load(std::memory_order_acquire) @@ -247,7 +247,7 @@ TEST(TaskThreadPool, basic) { } /** - * 等待所有的 push thread 退出,这样才能保证 pushThreadCount 计数更新了 + *Wait for all push threads to exit so that the pushThreadCount count is updated */ pushThreadCond.Wait(); diff --git a/test/common/test_name_lock.cpp b/test/common/test_name_lock.cpp index e5520e0a1a..5ce31db1d1 100644 --- a/test/common/test_name_lock.cpp +++ b/test/common/test_name_lock.cpp @@ -31,29 +31,29 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Rame lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - // 同锁同str TryLock失败 + // Rame lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Rame lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -63,12 +63,12 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Ruccessfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); diff --git a/test/failpoint/failpoint_test.cpp b/test/failpoint/failpoint_test.cpp index f0096b0ea4..9966407e7f 100644 --- a/test/failpoint/failpoint_test.cpp +++ b/test/failpoint/failpoint_test.cpp @@ -25,11 +25,11 @@ #include "test/failpoint/fiu_local.h" /* - * libfiu 使用文档详见:https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html - * 分为2个部分,一部分是core API,包括fiu_do_on/fiu_return_on/fiu_init - * core API 用于作用与注入在业务代码处,并由外部control API控制触发。 - * control API 包括:fiu_enable\fiu_disable\fiu_enable_random等等 - * 用于在测试代码处用户进行错误的注入,具体使用方式和方法如下示例代码所示 + * For detailed documentation on how to use libfiu, please refer to: https://blitiri.com.ar/p/libfiu/doc/man-libfiu.html + * Libfiu is divided into two parts: the core API, which includes functions like fiu_do_on/fiu_return_on/fiu_init. + * The core API is used to inject faults into your business code and is controlled externally using the control API. + * The control API includes functions like fiu_enable, fiu_disable, fiu_enable_random, and more. + * These functions are used in your test code to inject errors. You can find specific usage examples and methods in the code snippets below. */ namespace curve { @@ -45,20 +45,20 @@ class FailPointTest: public ::testing::Test { } }; -// 注入方式: 通过返回值的方式进行注入 +// Injection method: Inject by returning a value size_t free_space() { fiu_return_on("no_free_space", 0); return 100; } -// 注入方式: 通过side_effet 进行注入 +// Injection method: through side_effet injection void modify_state(int *val) { *val += 1; fiu_do_on("side_effect", *val += 1); return; } -// 注入方式: 通过side_effet 进行注入(lambda方式) +// Injection method: through side_effet injection (lambda method) void modify_state_with_lamda(int &val) { //NOLINT fiu_do_on("side_effect_2", auto func = [&] () { @@ -68,7 +68,7 @@ void modify_state_with_lamda(int &val) { //NOLINT return; } -// 错误触发方式: 总是触发 +// Error triggering method: always triggered TEST_F(FailPointTest, alwaysfail) { if (fiu_enable("no_free_space", 1, NULL, 0) == 0) { ASSERT_EQ(free_space(), 0); @@ -80,7 +80,7 @@ TEST_F(FailPointTest, alwaysfail) { ASSERT_EQ(free_space(), 100); } -// 错误触发方式: 随机触发错误 +// Error triggering method: Random error triggering TEST_F(FailPointTest, nondeterministic) { if (fiu_enable_random("no_free_space", 1, NULL, 0, 1) == 0) { ASSERT_EQ(free_space(), 0); diff --git a/test/fs/ext4_filesystem_test.cpp b/test/fs/ext4_filesystem_test.cpp index f2c6cfa520..19c8da52d2 100644 --- a/test/fs/ext4_filesystem_test.cpp +++ b/test/fs/ext4_filesystem_test.cpp @@ -79,7 +79,7 @@ TEST_F(Ext4LocalFileSystemTest, InitTest) { option.enableRenameat2 = true; struct utsname kernel_info; - // 测试版本偏低的情况 + // Testing with a lower version snprintf(kernel_info.release, sizeof(kernel_info.release), "%s", diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index bc922d19e2..1bc0999a93 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -106,7 +106,7 @@ class ChunkServerIoTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; params_.push_back(const_cast(chunkServerParams[0])); - // 初始化chunkfilepool,这里会预先分配一些chunk + // Initialize chunkfilepool, where some chunks will be pre allocated lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool/"; @@ -124,7 +124,7 @@ class ChunkServerIoTest : public testing::Test { ::system(rmdir1.c_str()); - // 等待进程结束 + // Waiting for the process to end ::usleep(100 * 1000); } @@ -138,7 +138,7 @@ class ChunkServerIoTest : public testing::Test { return -1; } - // 等待leader产生 + // Waiting for the leader to be generated if (cluster->WaitLeader(&leaderPeer_)) { LOG(ERROR) << "WaiteLeader failed"; return -1; @@ -171,13 +171,13 @@ class ChunkServerIoTest : public testing::Test { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - /* 场景一:新建的文件,Chunk文件不存在 */ + /* Scenario 1: Newly created file, Chunk file does not exist*/ ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, length, nullptr)); ASSERT_EQ(0, verify->VerifyGetChunkInfo( chunkId, NULL_SN, NULL_SN, leader)); ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - /* 场景二:通过WriteChunk产生chunk文件后操作 */ + /* Scenario 2: After generating a chunk file through WriteChunk, perform the operation*/ data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, data.c_str(), &chunkData)); @@ -202,7 +202,7 @@ class ChunkServerIoTest : public testing::Test { ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); - /* 场景三:用户删除文件 */ + /* Scenario 3: User deletes files*/ ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); ASSERT_EQ(0, verify->VerifyGetChunkInfo( chunkId, NULL_SN, NULL_SN, leader)); @@ -216,148 +216,148 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn3 = 3; int length = kOpRequestAlignSize; std::string data(length * 4, 0); - std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 - std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 - std::string chunkData1c(kChunkSize, 0); // chunk1版本3预期数据 - std::string chunkData2(kChunkSize, 0); // chunk2预期数据 + std::string chunkData1a(kChunkSize, 0); // chunk1 version 1 expected data + std::string chunkData1b(kChunkSize, 0); // chunk1 version 2 expected data + std::string chunkData1c(kChunkSize, 0); // chunk1 version 3 expected data + std::string chunkData2(kChunkSize, 0); // chunk2 expected data std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - // 构造初始环境 - // 写chunk1产生chunk1,chunk1版本为1,chunk2开始不存在。 + // Construct initial environment + // Writing chunk1 generates chunk1, which is version 1 and does not exist at the beginning of chunk2. data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn1, 0, 12 * KB, data.c_str(), &chunkData1a)); /* - * 场景一:第一次给文件打快照 + * Scenario 1: Taking a snapshot of a file for the first time */ - chunkData1b.assign(chunkData1a); // 模拟对chunk1数据进行COW + chunkData1b.assign(chunkData1a); //Simulate COW on chunk1 data data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, data.c_str(), &chunkData1b)); - // 重复写入同一区域,用于验证不会重复cow + // Write repeatedly to the same area to verify that there will be no duplicate rows data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, data.c_str(), &chunkData1b)); - // 读取chunk1快照,预期读到版本1数据 + // Reading chunk1 snapshot, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, &chunkData1a)); - // chunk1写[0, 4KB] + // Chunk1 write [0, 4KB] data.assign(length, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 0, 4 * KB, data.c_str(), &chunkData1b)); - // chunk1写[4KB, 16KB] + // Chunk1 write [4KB, 16KB] data.assign(length, 'e'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 12 * KB, data.c_str(), &chunkData1b)); - // 获取chunk1信息,预期其版本为2,快照版本为1, + // Obtain chunk1 information, with expected version 2 and snapshot version 1, ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, sn1, leader)); - // chunk1读[0, 12KB], 预期读到版本2数据 + // Chunk1 read [0, 12KB], expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk1的快照, 预期读到版本1数据 + // Reading snapshot of chunk1, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, &chunkData1a)); - // 读取chunk2的快照, 预期chunk不存在 + // Reading snapshot of chunk2, expected chunk not to exist ASSERT_EQ(0, verify->VerifyReadChunkSnapshot( chunk2, sn1, 0, 12 * KB, nullptr)); /* - * 场景二:第一次快照结束,删除快照 + * Scenario 2: The first snapshot ends and the snapshot is deleted */ - // 删除chunk1快照 + // Delete chunk1 snapshot ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); - // 获取chunk1信息,预期其版本为2,无快照版本 + // Obtain chunk1 information, expect its version to be 2, no snapshot version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, NULL_SN, leader)); - // 删chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); - // chunk2写[0, 8KB] + // Chunk2 write [0, 8KB] data.assign(length, 'f'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn2, 0, 8 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为2,无快照版本 + // Obtain chunk2 information, expect its version to be 2, no snapshot version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); /* - * 场景三:第二次打快照 + * Scenario 3: Taking a second snapshot */ - // chunk1写[0, 8KB] - chunkData1c.assign(chunkData1b); // 模拟对chunk1数据进行COW + // Chunk1 write [0, 8KB] + chunkData1c.assign(chunkData1b); //Simulate COW on chunk1 data data.assign(length, 'g'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn3, 0, 8 * KB, data.c_str(), &chunkData1c)); - // 获取chunk1信息,预期其版本为3,快照版本为2 + // Obtain chunk1 information, expect its version to be 3 and snapshot version to be 2 ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn3, sn2, leader)); - // 读取chunk1的快照, 预期读到版本2数据 + // Reading snapshot of chunk1, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk2的快照, 预期读到版本2数据 + // Reading snapshot of chunk2, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn2, 0, 8 * KB, &chunkData2)); - // 删除chunk1文件,预期成功,本地快照存在的情况下,会将快照也一起删除 + // Delete chunk1 file, expected success. If the local snapshot exists, the snapshot will also be deleted together ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunk(chunk1, sn3)); /* - * 场景四:第二次快照结束,删除快照 + * Scenario 4: The second snapshot ends and the snapshot is deleted */ - // 删除chunk1快照,因为chunk1及其快照上一步已经删除,预期成功 + // Delete chunk1 snapshot because chunk1 and its snapshot have been deleted in the previous step and are expected to succeed ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); - // 获取chunk1信息,预期不存在 + // Obtaining chunk1 information, expected not to exist ASSERT_EQ(0, verify->VerifyGetChunkInfo( chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); - // 获取chunk2信息,预期其版本为2,无快照版本 + // Obtain chunk2 information, expect its version to be 2, no snapshot version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'h'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'i'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); /* - * 场景五:用户删除文件 + * Scenario 5: User deletes files */ - // 删除chunk1,已不存在,预期成功 + // Delete chunk1, it no longer exists, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunk(chunk1, sn3)); - // 获取chunk1信息,预期不存在 + // Obtaining chunk1 information, expected not to exist ASSERT_EQ(0, verify->VerifyGetChunkInfo( chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2,预期成功 + // Delete chunk2, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, verify->VerifyDeleteChunk(chunk2, sn3)); - // 获取chunk2信息,预期不存在 + // Obtaining chunk2 information, expected not to exist ASSERT_EQ(0, verify->VerifyGetChunkInfo( chunk2, NULL_SN, NULL_SN, leader)); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index 58ce282336..21304958a7 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -217,11 +217,11 @@ class CSCloneRecoverTest : public ::testing::Test { s3Conf); cluster_->PrepareConfig(MDS0_CONF_PATH, mdsFileConf0); - // 生成chunkserver配置文件 + // Generate chunkserver configuration file cluster_->PrepareConfig(CHUNKSERVER_CONF_PATH, csCommonConf); - // 1. 启动etcd + // 1. Start etcd LOG(INFO) << "begin to start etcd"; pid_t pid = cluster_->StartSingleEtcd( 1, ETCD_CLIENT_IP_PORT, ETCD_PEER_IP_PORT, @@ -231,14 +231,14 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_GT(pid, 0); ASSERT_TRUE(cluster_->WaitForEtcdClusterAvalible(5)); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other two mds nodes pid = cluster_->StartSingleMDS(0, MDS0_IP_PORT, MDS0_DUMMY_PORT, mdsConf0, true); LOG(INFO) << "mds 0 started on " + MDS0_IP_PORT + ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 生成topo.json + // Generate topo.json Json::Value topo; Json::Value servers; std::string chunkServerIpPort[] = {CHUNK_SERVER0_IP_PORT, @@ -278,7 +278,7 @@ class CSCloneRecoverTest : public ::testing::Test { topoConf << topo.toStyledString(); topoConf.close(); - // 3. 创建物理池 + // 3. Creating a physical pool string createPPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -297,7 +297,7 @@ class CSCloneRecoverTest : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserve pid = cluster_->StartSingleChunkServer(1, CHUNK_SERVER0_IP_PORT, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " + CHUNK_SERVER0_IP_PORT + @@ -319,7 +319,7 @@ class CSCloneRecoverTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first string createLPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -338,7 +338,7 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_EQ(ret, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 获取chunkserver主节点 + // Obtain the chunkserver master node logicPoolId_ = 1; copysetId_ = 1; ASSERT_EQ(0, chunkSeverGetLeader()); @@ -346,12 +346,12 @@ class CSCloneRecoverTest : public ::testing::Test { copysetId_, 5000 }; opConf_ = conf0; - // 6. 初始化client配置 + // 6. Initialize client configuration LOG(INFO) << "init globalclient"; ret = Init(clientConfPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 先睡眠5s,让chunkserver选出leader + // 7. Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); s3Adapter_.Init(kS3ConfigPath); @@ -417,10 +417,10 @@ class CSCloneRecoverTest : public ::testing::Test { system(("mkdir " + CHUNKSERVER2_BASE_DIR + "/filepool").c_str())); } - /**下发一个写请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否成功完成 + /** Send a write request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Is IO successfully completed */ bool HandleAioWriteRequest(uint64_t offset, uint64_t size, const char* data) { @@ -432,7 +432,7 @@ class CSCloneRecoverTest : public ::testing::Test { char* buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it returns, it triggers cond gCond.Signal(); }; @@ -460,11 +460,11 @@ class CSCloneRecoverTest : public ::testing::Test { return true; } - /**下发一个读请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @data: 读出的数据 - * @return: IO是否成功完成 + /** Send a read request and wait for completion + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @data: Read out data + * @return: Is IO successfully completed */ bool HandleAioReadRequest(uint64_t offset, uint64_t size, char* data) { gCond.Reset(1); @@ -473,7 +473,7 @@ class CSCloneRecoverTest : public ::testing::Test { auto readCallBack = [](CurveAioContext* context) { gIoRet = context->ret; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it returns, it triggers cond gCond.Signal(); }; @@ -547,7 +547,7 @@ class CSCloneRecoverTest : public ::testing::Test { return -1; } - // 先睡眠5s,让chunkserver选出leader + // Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); status = curve::chunkserver::GetLeader(logicPoolId_, copysetId_, csConf, &leaderPeer_); @@ -559,26 +559,26 @@ class CSCloneRecoverTest : public ::testing::Test { } void prepareSourceDataInCurve() { - // 创建一个curveFS文件 + // Create a curveFS file LOG(INFO) << "create source curveFS file: " << CURVEFS_FILENAME; fd_ = curve::test::FileCommonOperation::Open(CURVEFS_FILENAME, "curve"); ASSERT_NE(fd_, -1); - // 写数据到curveFS的第1个chunk + // Write data to the first chunk of curveFS LOG(INFO) << "Write first 16MB of source curveFS file"; ASSERT_TRUE(HandleAioWriteRequest(0, kChunkSize, chunkData1_.c_str())); - // 读出数据进行验证 + // Read data for verification std::unique_ptr temp(new char[kChunkSize]); ASSERT_TRUE(HandleAioReadRequest(0, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData1_.c_str(), temp.get(), kChunkSize)); - // 写数据到curveFS的第2个chunk + // Write data to the second chunk of curveFS LOG(INFO) << "Write second 16MB of source curveFS file"; ASSERT_TRUE( HandleAioWriteRequest(kChunkSize, kChunkSize, chunkData2_.c_str())); - // 读出数据进行验证 + // Read data for verification ASSERT_TRUE(HandleAioReadRequest(kChunkSize, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData2_.c_str(), temp.get(), kChunkSize)); @@ -613,14 +613,14 @@ class CSCloneRecoverTest : public ::testing::Test { bool s3ObjExisted_; }; -// 场景一:通过ReadChunk从curve恢复克隆文件 +// Scenario 1: Restore clone files from curve through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { LOG(INFO) << "current case: CloneFromCurveByReadChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; ChunkID cloneChunk2 = 332; @@ -633,7 +633,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -647,7 +647,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -667,7 +667,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, kChunkServerMaxIoSize, @@ -675,9 +675,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { } /** - * clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Clone files will not be converted to regular chunk1 files after being read through + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -685,19 +685,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景二:通过RecoverChunk从curve恢复克隆文件 +// Scenario 2: Restore cloned files from curve through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { LOG(INFO) << "current case: CloneFromCurveByRecoverChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 333; ChunkID cloneChunk2 = 334; @@ -710,7 +710,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -724,7 +724,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -750,16 +750,16 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件,写成功。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated and successfully written. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -767,19 +767,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景三:lazy allocate场景下读克隆文件 +// Scenario 3: Lazy allocate scenario: Reading clone files TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { LOG(INFO) << "current case: CloneFromCurveByReadChunkWhenLazyAlloc"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. chunk文件不存在 + // 1. Chunk file does not exist ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; SequenceNum sn1 = 1; @@ -802,9 +802,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 将leader切换到follower + // Switch leader to follower ASSERT_EQ(0, TransferLeaderToFollower()); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); @@ -817,7 +817,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk string ioBuf(kChunkServerMaxIoSize, 'c'); for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyWriteChunk( @@ -828,9 +828,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { cloneData1.get(), CURVEFS_FILENAME, 0)); /** - * clone文件遍写后会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * The clone file will be converted to a regular chunk1 file after being overwritten + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -838,18 +838,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); } -// 场景四:通过ReadChunk从S3恢复克隆文件 +// Scenario 4: Restore cloned files from S3 through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { LOG(INFO) << "current case: CloneFromS3ByReadChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 335; ChunkID cloneChunk2 = 336; @@ -862,7 +862,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -875,7 +875,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -895,7 +895,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, kChunkServerMaxIoSize, @@ -903,9 +903,9 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { } /** - * 预期clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * It is expected that the clone file will not be converted to a regular chunk1 file after being read through + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -913,19 +913,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景五:通过RecoverChunk从S3恢复克隆文件 +// Scenario 5: Restore cloned files from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { LOG(INFO) << "current case: CloneFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 337; ChunkID cloneChunk2 = 338; @@ -938,7 +938,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -951,7 +951,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -977,16 +977,16 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -994,19 +994,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景六:通过ReadChunk从S3恢复 +// Scenario 6: Restore from S3 through ReadChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { LOG(INFO) << "current case: RecoverFromS3ByReadChunk"; - // 0. 构造数据上传到S3,模拟转储 + // 0. Upload construction data to S3 and simulate dump prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; SequenceNum sn2 = 2; @@ -1018,13 +1018,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, cloneData1.get())); @@ -1044,7 +1044,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, kChunkServerMaxIoSize, @@ -1052,9 +1052,9 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { } /** - * 预期clone文件不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file not to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1062,18 +1062,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn3)); } -// 场景七:通过RecoverChunk从S3恢复 +// Scenario 7: Recovering from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { LOG(INFO) << "current case: RecoverFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; SequenceNum sn2 = 2; @@ -1085,13 +1085,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, @@ -1117,16 +1117,16 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version, + * If it is a clone chunk, the write will fail; If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1134,7 +1134,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn4)); } diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index b38f819da7..6c859000c9 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -94,7 +94,7 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency testing for chunks not obtained from FilePool class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -143,7 +143,7 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -165,7 +165,7 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { std::vector params; }; -// chunk从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency test obtained by chunk from FilePool class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -197,7 +197,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { params.push_back(const_cast(chunkConcurrencyParams2[0])); - // 初始化FilePool,这里会预先分配一些chunk + // Initialize FilePool, where some chunks will be pre allocated lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) @@ -230,7 +230,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -256,7 +256,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { std::shared_ptr lfs; }; -// 写chunk +// Write chunk int WriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, @@ -298,7 +298,7 @@ int WriteChunk(Peer leader, return 0; } -// 随机选择一个chunk的随机offset进行read +// Randomly select a chunk's random offset for read void RandReadChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, @@ -313,7 +313,7 @@ void RandReadChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -329,7 +329,7 @@ void RandReadChunk(Peer leader, request.set_size(kOpRequestAlignSize); request.set_appliedindex(appliedIndex); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -351,7 +351,7 @@ void RandReadChunk(Peer leader, } } -// 随机选择一个chunk的随机offset进行write +// Randomly select a chunk's random offset for writing void RandWriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, @@ -368,7 +368,7 @@ void RandWriteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -384,7 +384,7 @@ void RandWriteChunk(Peer leader, request.set_size(kOpRequestAlignSize); cntl.request_attachment().append(data, length); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -405,7 +405,7 @@ void RandWriteChunk(Peer leader, } } -// 随机选择一个chunk删除 +// Randomly select a chunk to delete void RandDeleteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, @@ -419,7 +419,7 @@ void RandDeleteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -449,7 +449,7 @@ void RandDeleteChunk(Peer leader, } } -// 创建clone chunk +// Create clone chunk void CreateCloneChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, @@ -496,10 +496,10 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在FilePool分配好的 + *Chunks are not pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -510,7 +510,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -521,7 +521,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, @@ -531,7 +531,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { data.c_str(), sn)); - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandReadChunk, @@ -548,14 +548,14 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -565,7 +565,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandWriteChunk, @@ -582,7 +582,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset +// Multiple threads simultaneously writing the same chunk and offset TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT const int kThreadNum = 10; std::vector datas; @@ -591,7 +591,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -601,7 +601,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); @@ -621,7 +621,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 3. 将数据read出来验证 + // 3. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -645,7 +645,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data>='a 'and<='a'+kThreadNum -1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -653,7 +653,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -663,7 +663,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -673,7 +673,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -685,12 +685,12 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { sn)); } - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 + // Start read thread threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, @@ -699,7 +699,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { kMaxLoop, sn)); } else { - // 起write线程 + // Start write thread threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, @@ -715,7 +715,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -725,7 +725,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -735,7 +735,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -747,7 +747,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { sn)); } - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandReadChunk, @@ -764,14 +764,14 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none of these chunks have been written yet TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -781,7 +781,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandReadChunk, @@ -798,7 +798,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -808,7 +808,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -818,8 +818,8 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生,避免下面同时从 - // chunkfile pool生成new chunk导致write 超时失败 + // 2. Initiate a write to the chunk to ensure that the chunk has already been generated, avoiding the need for both + // Chunkfile pool generates new chunks, resulting in write timeout failure for (int k = 1; k < chunkIdRange + 1; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -831,7 +831,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { sn)); } - // 4. 起多个线程执行随机write chunk + // 4. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandWriteChunk, @@ -848,7 +848,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -856,7 +856,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -866,12 +866,12 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 + // Start read thread threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, @@ -880,7 +880,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { kMaxLoop, sn)); } else { - // 起write线程 + // Start write thread threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, @@ -896,7 +896,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -906,7 +906,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -916,7 +916,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -928,10 +928,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 + // Start delete thread threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, @@ -945,12 +945,12 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -960,7 +960,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { @@ -978,10 +978,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在FilePool分配好的 + * Chunks are pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -992,7 +992,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1002,7 +1002,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, @@ -1012,7 +1012,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { data.c_str(), sn)); - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandReadChunk, @@ -1029,14 +1029,14 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1046,7 +1046,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandWriteChunk, @@ -1063,7 +1063,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset +// Multiple threads simultaneously writing the same chunk and offset TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT const int kThreadNum = 10; std::vector datas; @@ -1072,7 +1072,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1082,7 +1082,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); @@ -1102,7 +1102,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 4. 将数据read出来验证 + // 4. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -1126,7 +1126,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data >='a' and <= 'a' + kThreadNum - 1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -1134,7 +1134,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1142,7 +1142,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1152,12 +1152,12 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 + // Start read thread threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, @@ -1166,7 +1166,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { kMaxLoop, sn)); } else { - // 起write线程 + // Start write thread threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, @@ -1182,7 +1182,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1192,7 +1192,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1202,7 +1202,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -1214,7 +1214,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { sn)); } - // 4. 起多个线程执行随机read chunk + // 4. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandReadChunk, @@ -1231,14 +1231,14 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none of these chunks have been written yet TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1248,7 +1248,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandReadChunk, @@ -1265,7 +1265,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1273,7 +1273,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1283,7 +1283,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { threads.push_back(Thread(RandWriteChunk, @@ -1300,7 +1300,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1308,7 +1308,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1318,12 +1318,12 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 + // Start read thread threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, @@ -1332,7 +1332,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { kMaxLoop, sn)); } else { - // 起write线程 + // Start write thread threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, @@ -1348,7 +1348,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1358,7 +1358,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1368,7 +1368,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -1380,10 +1380,10 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 + // Start delete thread threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, @@ -1397,12 +1397,12 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1412,7 +1412,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { @@ -1429,7 +1429,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { } } -// 多线程并发随机读写同多个chunk,同事伴随这并发的COW +// Multiple threads simultaneously read and write randomly to the same chunk, with colleagues accompanying the concurrent COW TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1439,7 +1439,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ChunkID chunkIdRange = kChunkNum / 2; int sn = 1; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, @@ -1449,7 +1449,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 用低版本的sn写一遍chunk + // 2. Write a chunk using a lower version of SN for (int k = 1; k <= chunkIdRange; ++k) { ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, @@ -1460,15 +1460,15 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { data.c_str(), sn)); } - // sn加1,保证后面的write会产生COW + // Add 1 to sn to ensure that subsequent writes will generate COW sn += 1; - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(10); if (read <= 1) { - // 起read线程,20%概率 + // Start read thread with a 20% probability threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, @@ -1477,7 +1477,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { kMaxLoop, sn)); } else { - // 起write线程 + // Start write thread threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, diff --git a/test/integration/chunkserver/datastore/datastore_basic_test.cpp b/test/integration/chunkserver/datastore/datastore_basic_test.cpp index 14fdc3901c..81a5af7206 100644 --- a/test/integration/chunkserver/datastore/datastore_basic_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_basic_test.cpp @@ -36,8 +36,8 @@ class BasicTestSuit : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(BasicTestSuit, BasicTest) { ChunkID id = 1; @@ -49,25 +49,25 @@ TEST_F(BasicTestSuit, BasicTest) { CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scene One: New file created, Chunk file does not exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // ChunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /****************** Scene Two: Operations after generating chunk files through WriteChunk **************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); @@ -80,7 +80,7 @@ TEST_F(BasicTestSuit, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -93,13 +93,13 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, @@ -108,7 +108,7 @@ TEST_F(BasicTestSuit, BasicTest) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); @@ -120,7 +120,7 @@ TEST_F(BasicTestSuit, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, @@ -130,7 +130,7 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; @@ -144,7 +144,7 @@ TEST_F(BasicTestSuit, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, @@ -155,7 +155,7 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; @@ -172,7 +172,7 @@ TEST_F(BasicTestSuit, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id, sn, @@ -184,7 +184,7 @@ TEST_F(BasicTestSuit, BasicTest) { ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - /******************场景三:用户删除文件******************/ + /******************Scene Three: User deletes file******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp index 3b0d635652..0884b98280 100644 --- a/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_clone_case_test.cpp @@ -36,7 +36,7 @@ class CloneTestSuit : public DatastoreIntegrationBase { }; /** - * 克隆场景测试 + * Clone scenario testing */ TEST_F(CloneTestSuit, CloneTest) { ChunkID id = 1; @@ -48,16 +48,16 @@ TEST_F(CloneTestSuit, CloneTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -71,14 +71,14 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 + // Call the interface again, but still return success. Chunk information remains unchanged errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -92,14 +92,14 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 创建克隆文件chunk2 + // Create clone file chunk2 errorCode = dataStore_->CreateCloneChunk(2, // chunk id sn, correctedSn, CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(2, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -113,11 +113,11 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ - // 构造原始数据 + /******************Scene 2: Restoring Cloned Files******************/ + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // WriteChunk写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf1[2 * PAGE_SIZE]; @@ -129,7 +129,7 @@ TEST_F(CloneTestSuit, CloneTest) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -137,18 +137,18 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->PasteChunk(id, @@ -156,7 +156,7 @@ TEST_F(CloneTestSuit, CloneTest) { offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -164,18 +164,18 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘a’ + // Reading Chunk data, [0, 8KB] data should be 'a' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf1, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // WriteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; char writeBuf3[2 * PAGE_SIZE]; @@ -187,7 +187,7 @@ TEST_F(CloneTestSuit, CloneTest) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -195,11 +195,11 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 4KB]数据应为‘a’,[4KB, 12KB]数据应为‘c’ + // Reading Chunk data, [0, 4KB] data should be 'a', [4KB, 12KB] data should be 'c' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -207,7 +207,7 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(0, memcmp(writeBuf1, readBuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(writeBuf3, readBuf + PAGE_SIZE, 2 * PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Conversion of Cloned Files after Iterative Writing into Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { @@ -217,7 +217,7 @@ TEST_F(CloneTestSuit, CloneTest) { 1 * kMB); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -226,15 +226,15 @@ TEST_F(CloneTestSuit, CloneTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - /******************场景三:删除文件****************/ + /******************Scene 3: Delete File****************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(1, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(2, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(2, &info); @@ -242,7 +242,7 @@ TEST_F(CloneTestSuit, CloneTest) { } /** - * 恢复场景测试 + * Recovery scenario testing */ TEST_F(CloneTestSuit, RecoverTest) { ChunkID id = 1; @@ -254,16 +254,16 @@ TEST_F(CloneTestSuit, RecoverTest) { CSChunkInfo info; std::string location("test@s3"); - /******************场景一:创建克隆文件******************/ + /******************Scenario 1: Creating Cloned Files******************/ - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -277,14 +277,14 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 再次调该接口,仍返回成功,chunk的信息不变 + // Call the interface again, but still return success. Chunk information remains unchanged errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, 3, // corrected sn CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -298,12 +298,12 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - /******************场景二:恢复克隆文件******************/ + /******************Scene 2: Restoring Cloned Files******************/ sn = 3; - // 构造原始数据 + // Construct raw data char pasteBuf[4 * PAGE_SIZE]; memset(pasteBuf, '1', 4 * PAGE_SIZE); - // PasteChunk写数据到clone chunk的[0, 8KB]区域 + // PasteChunk writes data to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->PasteChunk(id, @@ -311,7 +311,7 @@ TEST_F(CloneTestSuit, RecoverTest) { offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); @@ -319,18 +319,18 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be '1' size_t readSize = 2 * PAGE_SIZE; char readBuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(pasteBuf, readBuf, readSize)); - // WriteChunk再次写数据到clone chunk的[0, 8KB]区域 + // WriteChunk writes data again to the [0, 8KB] area of the clone chunk offset = 0; length = 2 * PAGE_SIZE; char writeBuf2[2 * PAGE_SIZE]; @@ -342,7 +342,7 @@ TEST_F(CloneTestSuit, RecoverTest) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -350,18 +350,18 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(2, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(2)); - // 读Chunk数据,[0, 8KB]数据应为‘b’ + // Reading Chunk data, [0, 8KB] data should be 'b' readSize = 2 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(writeBuf2, readBuf, readSize)); - // PasteChunk再次写数据到clone chunk的[4KB, 12KB]区域 + // PasteChunk writes data again to the [4KB, 12KB] area of the clone chunk offset = PAGE_SIZE; length = 2 * PAGE_SIZE; errorCode = dataStore_->PasteChunk(id, @@ -369,7 +369,7 @@ TEST_F(CloneTestSuit, RecoverTest) { offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -377,11 +377,11 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(correctedSn, info.correctedSn); ASSERT_EQ(true, info.isClone); ASSERT_NE(nullptr, info.bitmap); - // 写入PAGE对应bit置为1,其余都为0 + // Write the corresponding bit of PAGE to 1, and all other bits are set to 0 ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(3, info.bitmap->NextClearBit(0)); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(3)); - // 读Chunk数据,[0, 8KB]数据应为‘b’,[8KB, 12KB]数据应为‘1’ + // Reading Chunk data, [0, 8KB] data should be 'b', [8KB, 12KB] data should be '1' readSize = 3 * PAGE_SIZE; memset(readBuf, 0, sizeof(readBuf)); errorCode = dataStore_->ReadChunk(id, sn, readBuf, 0, readSize); @@ -389,7 +389,7 @@ TEST_F(CloneTestSuit, RecoverTest) { ASSERT_EQ(0, memcmp(writeBuf2, readBuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(pasteBuf, readBuf + 2 * PAGE_SIZE, PAGE_SIZE)); - /******************场景三:clone文件遍写后转换为普通chunk文件*************/ + /******************Scene 3: Convert Cloned Files from Sequential Write to Regular Chunk Files*************/ char overBuf[1 * kMB] = {0}; for (int i = 0; i < 16; ++i) { @@ -401,7 +401,7 @@ TEST_F(CloneTestSuit, RecoverTest) { nullptr); // length ASSERT_EQ(errorCode, CSErrorCode::Success); } - // 检查chunk的各项信息,都符合预期,chunk转为了普通的chunk + // Check all the information of the chunk and ensure it meets expectations. The chunk will be converted to a regular chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); diff --git a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp index e873cdb667..7ec5017200 100644 --- a/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_concurrency_test.cpp @@ -46,7 +46,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { const int kThreadNum = 10; auto readFunc = [&](ChunkID id) { - // 五分之一概率增加版本号 + // One fifth probability of increasing version number if (rand_r(&seed) % 5 == 0) ++sn; uint64_t pageIndex = rand_r(&seed) % (CHUNK_SIZE / PAGE_SIZE); @@ -107,7 +107,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { Thread threads[kThreadNum]; printf("===============TEST CHUNK1===================\n"); - // 测试并发对同一chunk进行随机操作 + // Testing concurrent random operations on the same chunk for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, 1, kLoopNum); } @@ -118,7 +118,7 @@ TEST_F(ConcurrencyTestSuit, ConcurrencyTest) { printf("===============TEST RANDOM==================\n"); - // 测试并发对不同chunk进行随机操作 + // Test and perform random operations on different chunks simultaneously int idRange = 10; for (int i = 0; i < kThreadNum; ++i) { threads[i] = std::thread(Run, idRange, kLoopNum); diff --git a/test/integration/chunkserver/datastore/datastore_exception_test.cpp b/test/integration/chunkserver/datastore/datastore_exception_test.cpp index 5405b03e8c..3069ded295 100644 --- a/test/integration/chunkserver/datastore/datastore_exception_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_exception_test.cpp @@ -36,9 +36,9 @@ class ExceptionTestSuit : public DatastoreIntegrationBase { }; /** - * 异常测试1 - * 用例:chunk的metapage数据损坏,然后启动DataStore - * 预期:重启失败 + * Exception test 1 + * Scenario: Chunk's metapage data is corrupt, and then start DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest1) { SequenceNum fileSn = 1; @@ -47,7 +47,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest1) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -58,25 +58,25 @@ TEST_F(ExceptionTestSuit, ExceptionTest1) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage + // Modifying the metapage of chunk1 through lfs std::string chunkPath = baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); @@ -84,9 +84,9 @@ TEST_F(ExceptionTestSuit, ExceptionTest1) { } /** - * 异常测试2 - * 用例:chunk的metapage数据损坏,然后更新了metapage,然后重启DataStore - * 预期:重启datastore可以成功 + * Exception Test 2 + * Scenario: Chunk's metapage data is corrupt, then the metapage is updated, and then the DataStore is restarted + * Expected: Restarting the datastore can be successful */ TEST_F(ExceptionTestSuit, ExceptionTest2) { SequenceNum fileSn = 1; @@ -95,7 +95,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -106,19 +106,19 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage + // Modifying the metapage of chunk1 through lfs std::string chunkPath = baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metapage[PAGE_SIZE]; int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 触发metapage更新 + //Trigger metapage Update errorCode = dataStore_->WriteChunk(1, // id ++fileSn, buf, @@ -127,13 +127,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); @@ -141,9 +141,9 @@ TEST_F(ExceptionTestSuit, ExceptionTest2) { } /** - * 异常测试3 - * 用例:chunk快照的metapage数据损坏,然后重启DataStore - * 预期:重启失败 + * Exception Test 3 + * Scenario: Chunk snapshot metadata data corruption, then restart DataStore + * Expected: Reboot failed */ TEST_F(ExceptionTestSuit, ExceptionTest3) { SequenceNum fileSn = 1; @@ -152,7 +152,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -163,7 +163,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id ++fileSn, buf, @@ -172,25 +172,25 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage + // Modifying the metapage of chunk1 snapshot through lfs std::string snapPath = baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); @@ -198,9 +198,9 @@ TEST_F(ExceptionTestSuit, ExceptionTest3) { } /** - * 异常测试4 - * 用例:chunk快照的metapage数据损坏,但是更新了metapage,然后重启DataStore - * 预期:重启成功 + * Exception Test 4 + * Scenario: Chunk snapshot's metapage data is corrupt, but the metapage is updated, and then restart the DataStore + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest4) { SequenceNum fileSn = 1; @@ -209,7 +209,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf[PAGE_SIZE]; memset(buf, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -220,7 +220,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 生成快照文件 + // Generate snapshot files errorCode = dataStore_->WriteChunk(1, // id ++fileSn, buf, @@ -229,7 +229,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 触发快照metapage更新 + //Trigger snapshot metadata update errorCode = dataStore_->WriteChunk(1, // id fileSn, buf, @@ -238,25 +238,25 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1快照的metapage + // Modifying the metapage of chunk1 snapshot through lfs std::string snapPath = baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); char metapage[PAGE_SIZE]; int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); lfs_->Read(fd, metapage, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage metapage[0]++; lfs_->Write(fd, metapage, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); @@ -264,9 +264,9 @@ TEST_F(ExceptionTestSuit, ExceptionTest4) { } /** - * 异常测试5 - * 用例:WriteChunk数据写到一半重启 - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 5 + * Scenario: WriteChunk data is written halfway and restarted + * Expected: Successful restart, successful re execution of the previous operation */ TEST_F(ExceptionTestSuit, ExceptionTest5) { SequenceNum fileSn = 1; @@ -275,7 +275,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -286,33 +286,33 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; - // 通过lfs写一半数据到chunk文件 + // Write half of the data to the chunk file through lfs std::string chunkPath = baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf2, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id fileSn, buf2, @@ -320,7 +320,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id fileSn, @@ -332,9 +332,9 @@ TEST_F(ExceptionTestSuit, ExceptionTest5) { } /** - * 异常测试6 - * 用例:WriteChunk更新metapage后重启,sn>chunk.sn,sn==chunk.correctedSn - * 预期:重启成功,重新执行上一条操作成功 + * Exception Test 6 + * Scenario: WriteChunk updates the metapage and restarts, sn>chunk.sn,sn==chunk.correctedSn + * Expected: Successful restart, successful re execution of the previous operation */ TEST_F(ExceptionTestSuit, ExceptionTest6) { SequenceNum fileSn = 1; @@ -343,7 +343,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1 + // Generate chunk1 char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -354,18 +354,18 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 更新 correctedsn 为2 + // Update correctedsn to 2 errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(1, 2); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 构造要写入的请求参数 + // Construct request parameters to write char buf2[2 * PAGE_SIZE]; memset(buf2, '2', length); offset = 0; length = 2 * PAGE_SIZE; fileSn = 2; // sn > chunk.sn; sn == chunk.correctedSn - // 通过lfs修改chunk1的metapage + // Modifying the metapage of chunk1 through lfs std::string chunkPath = baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; @@ -373,30 +373,30 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = fileSn; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->WriteChunk(1, // id fileSn, buf2, @@ -404,7 +404,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id fileSn, @@ -416,11 +416,11 @@ TEST_F(ExceptionTestSuit, ExceptionTest6) { } /** - * 异常测试7 - * 用例:WriteChunk产生快照后重启,恢复历史操作和当前操作 + * Exception Test 7 + * Scenario: WriteChunk generates a snapshot and restarts, restoring historical and current operations * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn>chunk.correctedSn - * 预期:重启成功 + * Test chunk.sn>chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest7) { SequenceNum fileSn = 1; @@ -429,7 +429,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -440,7 +440,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -452,19 +452,19 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -472,7 +472,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id fileSn, buf1, @@ -480,13 +480,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); @@ -497,13 +497,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id fileSn, readbuf, @@ -511,7 +511,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id 1, readbuf, @@ -522,11 +522,11 @@ TEST_F(ExceptionTestSuit, ExceptionTest7) { } /** - * 异常测试8 - * 用例:WriteChunk产生快照后重启, + * Exception Test8 + * Scenario: WriteChunk generates a snapshot and restarts, * sn>chunk.sn, sn>chunk.correctedSn - * 测chunk.sn==chunk.correctedSn - * 预期:重启成功 + * Test chunk.sn==chunk.correctedSn + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest8) { SequenceNum fileSn = 1; @@ -535,7 +535,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,构造chunk.sn==chunk.correctedsn的场景 + // Generate chunk1 and construct a scenario where chunk.sn==chunk.correctedsn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -555,7 +555,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 2; @@ -567,19 +567,19 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -587,7 +587,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id fileSn, buf1, @@ -595,13 +595,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 读快照文件来校验是否有cow + // Read the snapshot file to verify if there is a row char readbuf[2 * PAGE_SIZE]; snapshot.Read(readbuf, offset, length); - // 预期未发生cow + // Expected no cows to occur ASSERT_NE(0, memcmp(buf1, readbuf, length)); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); @@ -612,13 +612,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(3, info.curSn); ASSERT_EQ(2, info.snapSn); ASSERT_EQ(2, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten errorCode = dataStore_->ReadChunk(1, // id fileSn, readbuf, @@ -626,7 +626,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id 2, readbuf, @@ -637,10 +637,10 @@ TEST_F(ExceptionTestSuit, ExceptionTest8) { } /** - * 异常测试9 - * 用例:WriteChunk产生快照并更新metapage后重启,恢复历史操作和当前操作 + * Exception Test 9 + * Scenario: WriteChunk generates a snapshot and updates the metapage before restarting, restoring historical and current operations * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest9) { SequenceNum fileSn = 1; @@ -649,7 +649,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where chunk.sn>chunk.correctedSn char buf1[PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -660,7 +660,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟创建快照文件 + // Simulate creating snapshot files ChunkOptions chunkOption; chunkOption.id = 1; chunkOption.sn = 1; @@ -672,7 +672,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { errorCode = snapshot.Open(true); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 通过lfs修改chunk1的metapage + // Modifying the metapage of chunk1 through lfs std::string chunkPath = baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); char metabuf[PAGE_SIZE]; @@ -680,30 +680,30 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { ASSERT_GT(fd, 0); lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 模拟更新metapage成功 + // Successfully simulated updating of metapage ChunkFileMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, metaPage.sn); metaPage.sn = 2; metaPage.encode(metabuf); - // 更新metapage + // Update Metapage lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -711,7 +711,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复前一条操作 + // Simulate the previous operation of log recovery errorCode = dataStore_->WriteChunk(1, // id fileSn, buf1, @@ -720,7 +720,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation fileSn++; char buf2[PAGE_SIZE]; memset(buf2, '2', length); @@ -731,13 +731,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查是否更新了版本号 + // Check if the version number has been updated errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id fileSn, @@ -746,7 +746,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id 1, readbuf, @@ -757,10 +757,10 @@ TEST_F(ExceptionTestSuit, ExceptionTest9) { } /** - * 异常测试10 - * 用例:WriteChunk更新快照metapage前重启,恢复历史操作和当前操作 + * Exception Test 10 + * Scenario: WriteChunk restarts before updating the snapshot metapage to restore historical and current operations * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest10) { SequenceNum fileSn = 1; @@ -769,7 +769,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -780,7 +780,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; @@ -794,17 +794,17 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow + // Simulate Cow std::string snapPath = baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); - // 更新metapage + // Update Metapage char metabuf[PAGE_SIZE]; lfs_->Read(fd, metabuf, 0, PAGE_SIZE); - // 修改metapage + // Modify Metapage SnapshotMetaPage metaPage; errorCode = metaPage.decode(metabuf); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -813,19 +813,19 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { lfs_->Write(fd, metabuf, 0, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -833,7 +833,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id @@ -843,7 +843,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn @@ -853,7 +853,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn @@ -862,13 +862,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; @@ -879,7 +879,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id 1, readbuf, @@ -890,10 +890,10 @@ TEST_F(ExceptionTestSuit, ExceptionTest10) { } /** - * 异常测试11 - * 用例:WriteChunk更新快照metapage后重启,恢复历史操作和当前操作 + * Exception Test 11 + * Scenario: WriteChunk updates snapshot metadata and restarts to restore historical and current operations * sn>chunk.sn, sn>chunk.correctedSn - * 预期:重启成功 + * Expected: Reboot successful */ TEST_F(ExceptionTestSuit, ExceptionTest11) { SequenceNum fileSn = 1; @@ -902,7 +902,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { CSErrorCode errorCode; CSChunkInfo chunk1Info; - // 生成chunk1,模拟chunk.sn>chunk.correctedSn的情况 + // Generate chunk1 and simulate the situation where chunk.sn>chunk.correctedSn char buf1[2 * PAGE_SIZE]; memset(buf1, '1', length); errorCode = dataStore_->WriteChunk(1, // id @@ -913,7 +913,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 产生快照文件 + // Generate snapshot file fileSn++; length = PAGE_SIZE; char buf2[2 * PAGE_SIZE]; @@ -927,28 +927,28 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟cow + // Simulate Cow std::string snapPath = baseDir + "/" + FileNameOperator::GenerateSnapshotName(1, 1); int fd = lfs_->Open(snapPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, 2 * PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.metaPageSize = PAGE_SIZE; options.blockSize = BLOCK_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查是否加载了快照信息 + // Check if snapshot information is loaded CSChunkInfo info; errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -956,7 +956,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // 模拟日志恢复 + // Simulate log recovery offset = 0; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id @@ -966,7 +966,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::BackwardRequestError); - // 模拟恢复下一个操作 + // Simulate recovery of the next operation length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn @@ -976,7 +976,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 模拟恢复最后一条操作 + // Simulate recovery of the last operation offset = PAGE_SIZE; errorCode = dataStore_->WriteChunk(1, // id 2, // sn @@ -985,13 +985,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk 信息是否正确 + // Check if the chunk information is correct errorCode = dataStore_->GetChunkInfo(1, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, info.curSn); ASSERT_EQ(1, info.snapSn); ASSERT_EQ(0, info.correctedSn); - // chunk数据被覆盖 + // chunk data is overwritten char readbuf[2 * PAGE_SIZE]; offset = 0; length = 2 * PAGE_SIZE; @@ -1002,7 +1002,7 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2, readbuf, length)); - // 原数据cow到快照 + // Raw data cow to snapshot errorCode = dataStore_->ReadSnapshotChunk(1, // id 1, readbuf, @@ -1013,9 +1013,9 @@ TEST_F(ExceptionTestSuit, ExceptionTest11) { } /** - * 异常测试12 - * 用例:PasteChunk,数据写入一半时,还未更新metapage重启/崩溃 - * 预期:重启成功,paste成功 + * Exception Test 12 + * Scenario: PasteChunk, when data is written halfway and the metapage has not been updated, restart/crash + * Expected: Reboot successful, pass successful */ TEST_F(ExceptionTestSuit, ExceptionTest12) { ChunkID id = 1; @@ -1027,14 +1027,14 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { CSChunkInfo info; std::string location("test@s3"); - // 创建克隆文件chunk1 + // Create clone file chunk1 errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn CHUNK_SIZE, location); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk的各项信息,都符合预期 + // Check all the information of the chunk and ensure it meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(sn, info.curSn); @@ -1048,33 +1048,33 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { ASSERT_NE(nullptr, info.bitmap); ASSERT_EQ(Bitmap::NO_POS, info.bitmap->NextSetBit(0)); - // 构造要写入的数据和请求偏移 + // Construct data to be written and request offset char buf1[PAGE_SIZE]; memset(buf1, '1', length); offset = 0; length = PAGE_SIZE; - // 通过lfs写数据到chunk文件 + // Write data to chunk file through lfs std::string chunkPath = baseDir + "/" + FileNameOperator::GenerateChunkFileName(1); int fd = lfs_->Open(chunkPath, O_RDWR|O_NOATIME|O_DSYNC); ASSERT_GT(fd, 0); - // 写数据 + // Write data lfs_->Write(fd, buf1, offset + PAGE_SIZE, PAGE_SIZE); lfs_->Close(fd); - // 模拟重启 + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.blockSize = BLOCK_SIZE; options.metaPageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化,重启失败 + // Construct a new dataStore_, And reinitialize, restart failed dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 模拟日志恢复 + // Simulate log recovery errorCode = dataStore_->CreateCloneChunk(id, // chunk id sn, correctedSn, // corrected sn @@ -1087,13 +1087,13 @@ TEST_F(ExceptionTestSuit, ExceptionTest12) { offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查bitmap + // Check Bitmap errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, info.bitmap->NextSetBit(0)); ASSERT_EQ(1, info.bitmap->NextClearBit(0)); - // 读数据校验 + // Read data verification char readbuf[2 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(1, // id sn, diff --git a/test/integration/chunkserver/datastore/datastore_integration_base.h b/test/integration/chunkserver/datastore/datastore_integration_base.h index 0731eb39cd..0b93a693ad 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_base.h +++ b/test/integration/chunkserver/datastore/datastore_integration_base.h @@ -60,7 +60,7 @@ extern const string poolDir; // NOLINT extern const string poolMetaPath; // NOLINT /** - * DataStore层集成LocalFileSystem层测试 + * Datastore layer integration LocalFileSystem layer testing */ class DatastoreIntegrationBase : public testing::Test { public: diff --git a/test/integration/chunkserver/datastore/datastore_integration_test.cpp b/test/integration/chunkserver/datastore/datastore_integration_test.cpp index 52693dfa9e..63bcdd5e3c 100644 --- a/test/integration/chunkserver/datastore/datastore_integration_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_integration_test.cpp @@ -39,8 +39,8 @@ class DatastoreIntegrationTest : public DatastoreIntegrationBase { }; /** - * 基本功能测试验证 - * 读、写、删、获取文件信息 + * Basic functional testing verification + * Read, write, delete, and obtain file information */ TEST_F(DatastoreIntegrationTest, BasicTest) { ChunkID id = 1; @@ -52,29 +52,29 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { CSErrorCode errorCode; CSChunkInfo info; - /******************场景一:新建的文件,Chunk文件不存在******************/ + /******************Scenario 1: New File Created, Chunk File Does Not Exist******************/ - // 文件不存在 + // File does not exist ASSERT_FALSE(lfs_->FileExists(chunkPath)); - // 读chunk时返回ChunkNotExistError + // chunkNotExistError returned when reading chunk char readbuf[3 * PAGE_SIZE]; errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 无法获取到chunk的版本号 + // Unable to obtain the version number of the chunk errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 删除chunk返回Success + // Delete chunk and return Success errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景二:通过WriteChunk产生chunk文件后操作**************/ + /******************Scene 2: Operations after generating chunk files via WriteChunk.**************/ char buf1_1_1[PAGE_SIZE]; memset(buf1_1_1, 'a', length); - // 第一次WriteChunk会产生chunk文件 + // The first WriteChunk will generate a chunk file errorCode = dataStore_->WriteChunk(id, sn, buf1_1_1, @@ -83,7 +83,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk的信息,且各项信息符合预期 + // Chunk information can be obtained and all information meets expectations errorCode = dataStore_->GetChunkInfo(id, &info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(1, info.curSn); @@ -95,12 +95,12 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(false, info.isClone); ASSERT_EQ(nullptr, info.bitmap); - // 读取写入的4KB验证一下,应当与写入数据相等 + // Verify that the 4KB read and written should be equal to the data written errorCode = dataStore_->ReadChunk(id, sn, readbuf, offset, length); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_1, readbuf, length)); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed errorCode = dataStore_->ReadChunk(id, sn, readbuf, @@ -108,7 +108,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { length); ASSERT_EQ(errorCode, CSErrorCode::Success); - // chunk 存在时,覆盖写 + // Overwrite when chunk exists char buf1_1_2[PAGE_SIZE]; memset(buf1_1_2, 'b', length); errorCode = dataStore_->WriteChunk(id, @@ -119,7 +119,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed errorCode = dataStore_->ReadChunk(id, sn, readbuf, @@ -128,7 +128,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, length)); - // chunk 存在时,写入未写过区域 + // When a chunk exists, write to an unwritten area char buf1_1_3[PAGE_SIZE]; memset(buf1_1_3, 'c', length); offset = PAGE_SIZE; @@ -141,7 +141,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed errorCode = dataStore_->ReadChunk(id, sn, readbuf, @@ -151,7 +151,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(0, memcmp(buf1_1_2, readbuf, PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_1_3, readbuf + PAGE_SIZE, PAGE_SIZE)); - // chunk 存在时,覆盖部分区域 + // When a chunk exists, it covers some areas char buf1_1_4[2 * PAGE_SIZE]; memset(buf1_1_4, 'd', length); offset = PAGE_SIZE; @@ -164,7 +164,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 没被写过的区域也可以读,但是不保证读到的数据内容 + // Areas that have not been written can also be read, but the data content read is not guaranteed errorCode = dataStore_->ReadChunk(id, sn, readbuf, @@ -175,7 +175,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { ASSERT_EQ(0, memcmp(buf1_1_4, readbuf + PAGE_SIZE, 2 * PAGE_SIZE)); - /******************场景三:用户删除文件******************/ + /******************Scene 3: User Deletes File******************/ errorCode = dataStore_->DeleteChunk(id, sn); ASSERT_EQ(errorCode, CSErrorCode::Success); @@ -185,7 +185,7 @@ TEST_F(DatastoreIntegrationTest, BasicTest) { } /** - * 重启恢复测试 + * Restart Recovery Test */ TEST_F(DatastoreIntegrationTest, RestartTest) { SequenceNum fileSn = 1; @@ -196,7 +196,7 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSChunkInfo info3; std::string location("test@s3"); - // 构造要用到的读写缓冲区 + // Construct read and write buffers to be used char buf1_1[2 * PAGE_SIZE]; memset(buf1_1, 'a', length); char buf2_1[2 * PAGE_SIZE]; @@ -212,7 +212,7 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { size_t readSize = 4 * PAGE_SIZE; char readBuf[4 * PAGE_SIZE]; - // 各个操作对应的错误码返回值,错误码命名格式为 e_optype_chunid_sn + // The error code return value corresponding to each operation, and the error code naming format is e_optype_chunid_sn CSErrorCode e_write_1_1; CSErrorCode e_write_2_1; CSErrorCode e_write_2_2; @@ -224,51 +224,51 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { CSErrorCode e_delsnap_2_3; CSErrorCode e_clone_3_1; - // 模拟所有用户请求,用lamdba函数可以用于验证日志恢复时重用这部分代码 - // 如果后面要加用例,只需要在函数内加操作即可 + // Simulate all user requests and use the lamdba function to validate the reuse of this code during log recovery + // If you want to add use cases later, you only need to add operations within the function auto ApplyRequests = [&]() { fileSn = 1; - // 模拟普通文件操作,WriteChunk产生chunk1、chunk2 + // Simulate ordinary file operations, WriteChunk generates chunk1, chunk2 offset = 0; length = 2 * PAGE_SIZE; - // 产生chunk1 + //Generate chunk1 e_write_1_1 = dataStore_->WriteChunk(1, // chunk id fileSn, buf1_1, offset, length, nullptr); - // 产生chunk2 + //Generate chunk2 e_write_2_1 = dataStore_->WriteChunk(2, // chunk id fileSn, buf1_1, offset, length, nullptr); - // 删除chunk1 + // Delete chunk1 e_del_1_1 = dataStore_->DeleteChunk(1, fileSn); - // 模拟快照操作 + // Simulate snapshot operations ++fileSn; offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 + // Write chunk2 to generate a snapshot file e_write_2_2 = dataStore_->WriteChunk(2, // chunk id fileSn, buf2_2, offset, length, nullptr); - // 删除chunk2快照 + // Delete chunk2 snapshot e_delsnap_2_2 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后删除chunk2快照 + // Simulate taking another snapshot and then delete the chunk2 snapshot ++fileSn; e_delsnap_2_3 = dataStore_->DeleteSnapshotChunkOrCorrectSn(2, fileSn); - // 模拟再次快照,然后写数据到chunk2产生快照 + // Simulate another snapshot, then write data to chunk2 to generate a snapshot ++fileSn; offset = 2 * PAGE_SIZE; length = 2 * PAGE_SIZE; - // 写chunk2,产生快照文件 + // Write chunk2 to generate a snapshot file e_write_2_4 = dataStore_->WriteChunk(2, // chunk id fileSn, buf2_4, @@ -276,23 +276,23 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { length, nullptr); - // 模拟克隆操作 + // Simulate Clone Operations e_clone_3_1 = dataStore_->CreateCloneChunk(3, // chunk id 1, // sn 0, // corrected sn CHUNK_SIZE, location); - // 写数据到chunk3 + // Write data to chunk3 offset = 0; length = 2 * PAGE_SIZE; - // 写chunk3 + // Write chunk3 e_write_3_1 = dataStore_->WriteChunk(3, // chunk id 1, // sn writeBuf, offset, length, nullptr); - // paste数据到chunk3 + // Paste data to chunk3 offset = 1 * PAGE_SIZE; length = 2 * PAGE_SIZE; e_paste_3_1 = dataStore_->PasteChunk(3, // chunk id @@ -301,35 +301,35 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { length); }; - // 检查上面用户操作以后,DataStore层各文件的状态,可重用 + // After checking the user actions above, the status of each file in the DataStore layer can be reused auto CheckStatus = [&]() { CSErrorCode errorCode; - // chunk1 不存在 + // Chunk1 does not exist errorCode = dataStore_->GetChunkInfo(1, &info1); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // chunk2存在,版本为4,correctedSn为3,存在快照,快照版本为2 + // Chunk2 exists, version 4, correctedSn is 3, snapshot exists, snapshot version 2 errorCode = dataStore_->GetChunkInfo(2, &info2); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(4, info2.curSn); ASSERT_EQ(2, info2.snapSn); ASSERT_EQ(3, info2.correctedSn); - // 检查chunk2数据,[0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c + // Check chunk2 data, [0, 1KB]:a , [1KB, 2KB]:b , [2KB, 4KB]:c errorCode = dataStore_->ReadChunk(2, fileSn, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_4, readBuf + 2 * PAGE_SIZE, 2 * PAGE_SIZE)); - // 检查chunk2快照数据,[0, 1KB]:a , [1KB, 3KB]:b + // Check chunk2 snapshot data, [0, 1KB]:a , [1KB, 3KB]:b errorCode = dataStore_->ReadSnapshotChunk(2, 2, readBuf, 0, readSize); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_1, readBuf, 1 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf2_2, readBuf + 1 * PAGE_SIZE, 2 * PAGE_SIZE)); }; - /******************构造重启前的数据******************/ - // 提交操作 + /******************Generate data before reboot******************/ + // Submit Action ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::Success); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); @@ -340,27 +340,27 @@ TEST_F(DatastoreIntegrationTest, RestartTest) { ASSERT_EQ(e_clone_3_1, CSErrorCode::Success); ASSERT_EQ(e_write_3_1, CSErrorCode::Success); ASSERT_EQ(e_paste_3_1, CSErrorCode::Success); - // 检查此时各个文件的状态 + // Check the status of each file at this time CheckStatus(); - /******************场景一:重启重新加载文件******************/ - // 模拟重启 + /******************Scene 1: Reboot and Reload Files******************/ + // Simulate restart DataStoreOptions options; options.baseDir = baseDir; options.chunkSize = CHUNK_SIZE; options.pageSize = PAGE_SIZE; - // 构造新的dataStore_,并重新初始化 + // Construct a new dataStore_, And reinitialize dataStore_ = std::make_shared(lfs_, filePool_, options); ASSERT_TRUE(dataStore_->Initialize()); - // 检查各个chunk的状态,应该与前面的一致 + // Check the status of each chunk, which should be consistent with the previous one CheckStatus(); - /******************场景二:恢复日志,重放之前的操作******************/ - // 模拟日志回放 + /******************Scene 2: Restore logs, replay previous actions******************/ + // Simulate log playback ApplyRequests(); - // 检查每次操作的返回值是否符合预期 + // Check if the return value of each operation meets expectations ASSERT_EQ(e_write_1_1, CSErrorCode::Success); ASSERT_EQ(e_write_2_1, CSErrorCode::BackwardRequestError); ASSERT_EQ(e_del_1_1, CSErrorCode::Success); diff --git a/test/integration/chunkserver/datastore/datastore_restart_test.cpp b/test/integration/chunkserver/datastore/datastore_restart_test.cpp index f7a9d9ae5a..dd59267d3e 100644 --- a/test/integration/chunkserver/datastore/datastore_restart_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_restart_test.cpp @@ -30,7 +30,7 @@ namespace chunkserver { const string baseDir = "./data_int_res"; // NOLINT const string poolDir = "./chunfilepool_int_res"; // NOLINT const string poolMetaPath = "./chunfilepool_int_res.meta"; // NOLINT -// 以下的测试读写数据都在[0, 32kb]范围内 +// The following test read and write data are within the range of [0, 32kb] const uint64_t kMaxSize = 8 * PAGE_SIZE; struct RangeData { @@ -118,7 +118,7 @@ class ExecStep { kMaxSize); } statusAfterExec_->chunkData = chunkData; - // 快照存在,读取快照数据 + // Snapshot exists, reading snapshot data if (info.snapSn > 0) { char* snapData = new char[kMaxSize]; (*datastore_)->ReadSnapshotChunk( @@ -279,31 +279,31 @@ class StepList { void ClearEnv() { clearFunc_(); - // 清理每一步的预期状态,因为清理环境后,读取到的数据内容可能会不一样 - // 因为通过FilePool分配的chunk初始内容是不确定的 + // Clean up the expected state of each step, as the data content read after cleaning up the environment may differ + // Because the initial content of the chunk allocated through FilePool is uncertain for (auto &step : steps) { step->ClearStatus(); } } - // 重启前,用户最后执行的操作可能为任意步骤, - // 需要验证每个步骤作为最后执行操作时,日志从该步骤前任意步骤进行恢复的幂等性 - // 对于未执行的步骤可以不必验证,只要保证已执行步骤的恢复是幂等的 - // 未执行的步骤恢复一定是幂等的 + // Before restarting, the last action performed by the user may be any step, + // It is necessary to verify the idempotence of the log recovery from any step before each step as the final execution operation + // For steps that have not been executed, there is no need to verify as long as the recovery of the executed steps is idempotent + // Unexecuted step recovery must be idempotent bool VerifyLogReplay() { - // 验证每个步骤作为最后执行操作时日志恢复的幂等性 + // Verify the idempotence of log recovery at each step as the final operation for (int lastStep = 0; lastStep < steps.size(); ++lastStep) { - // 重新初始化环境 + // Reinitialize the environment ClearEnv(); printf("==============Verify log replay to step%d==============\n", lastStep + 1); - // 构造重启前环境 + // Construct a pre restart environment if (!ConstructEnv(lastStep)) { LOG(ERROR) << "Construct env failed."; Dump(); return false; } - // 验证日志恢复后的幂等性 + // Verify the idempotence of log recovery if (!ReplayLog(lastStep)) { LOG(ERROR) << "Replay log failed." << "last step: step" << lastStep + 1; @@ -322,15 +322,15 @@ class StepList { } private: - // 构造初始状态 + // Construction initial state bool ConstructEnv(int lastStep) { - // 模拟日志恢复前执行,用于构造初始Chunk状态,并初始化每一步的预期状态 + // Execute before simulating log recovery to construct the initial Chunk state and initialize the expected state for each step for (int curStep = 0; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); step->SetExpectStatus(); } - // 检查构造出来的状态是否符合预期 + // Check if the constructed state meets expectations if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "last step: step" << lastStep + 1; @@ -339,16 +339,16 @@ class StepList { return true; } - // 从最后步骤前任意一个步骤进行恢复都应该保证幂等性 + // Restoring from any step before the final step should ensure idempotence bool ReplayLog(int lastStep) { - // 模拟从不同的起始位置进行日志恢复 + // Simulate log recovery from different starting locations for (int beginStep = 0; beginStep <= lastStep; ++beginStep) { - // 执行恢复前,chunk的状态保证为预期的状态 + // Before performing the recovery, the state of the chunk is guaranteed to be the expected state for (int curStep = beginStep; curStep <= lastStep; ++curStep) { std::shared_ptr step = steps[curStep]; step->Exec(); } - // 每次日志恢复完成检查Chunk状态是否符合预期 + // Check if the Chunk status meets expectations after each log recovery is completed if (!CheckStatus(lastStep)) { LOG(ERROR) << "Check chunk status failed." << "begin step: step" << beginStep + 1 @@ -448,16 +448,16 @@ class StepList { std::shared_ptr step = steps[lastStep]; std::shared_ptr expectStatus = step->GetStatus(); - // 获取chunk信息 + // Obtain chunk information std::shared_ptr datastore = step->GetDataStore(); ChunkID id = step->GetChunkID(); CSChunkInfo info; CSErrorCode err = datastore->GetChunkInfo(id, &info); - // 返回Success说明chunk存在 + // Returning Success indicates that the chunk exists if (err == CSErrorCode::Success) { - // 检查chunk的状态 + // Check the status of the chunk if (!expectStatus->exist || expectStatus->chunkInfo != info) { LOG(ERROR) << "Chunk info is not as expected!"; @@ -479,18 +479,18 @@ class StepList { return false; } - // 检查chunk的数据状态 + // Check the data status of the chunk if (!CheckChunkData(step)) return false; - // 检查快照状态 + // Check snapshot status if (info.snapSn > 0) { - // 检查快照的数据状态 + // Check the data status of the snapshot if (!CheckSnapData(step)) return false; } } else if (err == CSErrorCode::ChunkNotExistError) { - // 预期chunk存在,实际却不存在 + // The expected chunk exists, but it does not actually exist if (expectStatus->exist) { LOG(ERROR) << "Chunk is expected to exist, but actual not."; return false; @@ -529,7 +529,7 @@ TEST_F(RestartTestSuit, BasicTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -538,7 +538,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -547,7 +547,7 @@ TEST_F(RestartTestSuit, BasicTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:DeleteChunk + // Step 3: DeleteChunk std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); @@ -561,7 +561,7 @@ TEST_F(RestartTestSuit, SnapshotTest) { ChunkID id = 1; SequenceNum sn = 1; - // 第一步:WriteChunk,写[0, 8kb]区域 + // Step 1: WriteChunk, write the [0, 8kb] area RangeData step1Data; step1Data.offset = 0; step1Data.length = 2 * PAGE_SIZE; @@ -570,10 +570,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step1Data); list.Add(step1); - // 模拟用户打了快照,此时sn +1 + // Simulated user took a snapshot, at which point sn+1 ++sn; - // 第二步:WriteChunk,写[4kb, 12kb]区域 + // Step 2: WriteChunk, write the [4kb, 12kb] area RangeData step2Data; step2Data.offset = PAGE_SIZE; step2Data.length = 2 * PAGE_SIZE; @@ -582,20 +582,20 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:用户请求删除快照 + // Step 3: User requests to delete the snapshot std::shared_ptr step3 = std::make_shared(&dataStore_, id, sn); list.Add(step3); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第四步:此次快照过程中没有数据写入,直接DeleteSnapshotOrCorrectedSn + // Step 4: No data was written during this snapshot process, directly delete SnapshotOrCorrectedSn std::shared_ptr step4 = std::make_shared(&dataStore_, id, sn); list.Add(step4); - // 第五步:WriteChunk,写[8kb, 16kb]区域 + // Step 5: WriteChunk, write the [8kb, 16kb] area RangeData step5Data; step5Data.offset = 2 * PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -604,10 +604,10 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第六步:WriteChunk,写[4kb, 12kb]区域 + // Step 6: WriteChunk, write the [4kb, 12kb] area RangeData step6Data; step6Data.offset = PAGE_SIZE; step6Data.length = 2 * PAGE_SIZE; @@ -616,20 +616,20 @@ TEST_F(RestartTestSuit, SnapshotTest) { std::make_shared(&dataStore_, id, sn, step6Data); list.Add(step6); - // 第七步:用户请求删除快照 + // Step 7: User requests to delete the snapshot std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); - // 模拟再次打快照 sn +1 + // Simulate taking a snapshot again sn+1 ++sn; - // 第八步:用户请求删除快照 + // Step 8: User requests to delete the snapshot std::shared_ptr step8 = std::make_shared(&dataStore_, id, sn); list.Add(step8); - // 第九步:用户请求删除chunk + // Step 9: User requests to delete chunk std::shared_ptr step9 = std::make_shared(&dataStore_, id, sn); list.Add(step9); @@ -637,7 +637,7 @@ TEST_F(RestartTestSuit, SnapshotTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试克隆场景,以及克隆后打快照的组合场景 +// Test the cloning scenario and the combination scenario of taking a snapshot after cloning TEST_F(RestartTestSuit, CloneTest) { StepList list(clearFunc); @@ -646,7 +646,7 @@ TEST_F(RestartTestSuit, CloneTest) { SequenceNum correctedSn = 0; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk + // Step 1: Create a clone chunk through CreateCloneChunk std::shared_ptr step1 = std::make_shared(&dataStore_, id, @@ -656,7 +656,7 @@ TEST_F(RestartTestSuit, CloneTest) { location); list.Add(step1); - // 第二步:WriteChunk,写[0kb, 8kb]区域 + // Step 2: WriteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -665,7 +665,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -674,7 +674,7 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -683,10 +683,10 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, step4Data); list.Add(step4); - // 模拟打快照 + // Simulate taking a snapshot ++sn; - // 第五步:WriteChunk,写[4kb, 12kb]区域 + // Step 5: WriteChunk, write the [4kb, 12kb] area RangeData step5Data; step5Data.offset = PAGE_SIZE; step5Data.length = 2 * PAGE_SIZE; @@ -695,12 +695,12 @@ TEST_F(RestartTestSuit, CloneTest) { std::make_shared(&dataStore_, id, sn, step5Data); list.Add(step5); - // 第六步:用户请求删除快照 + // Step 6: User requests to delete the snapshot std::shared_ptr step6 = std::make_shared(&dataStore_, id, sn); list.Add(step6); - // 第七步:DeleteChunk + // Step 7: DeleteChunk std::shared_ptr step7 = std::make_shared(&dataStore_, id, sn); list.Add(step7); @@ -708,7 +708,7 @@ TEST_F(RestartTestSuit, CloneTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 测试恢复场景 +// Testing Recovery Scenarios TEST_F(RestartTestSuit, RecoverTest) { StepList list(clearFunc); @@ -717,7 +717,7 @@ TEST_F(RestartTestSuit, RecoverTest) { SequenceNum correctedSn = 5; std::string location("test@s3"); - // 第一步:通过CreateCloneChunk创建clone chunk + // Step 1: Create a clone chunk through CreateCloneChunk std::shared_ptr step1 = std::make_shared(&dataStore_, id, @@ -727,10 +727,10 @@ TEST_F(RestartTestSuit, RecoverTest) { location); list.Add(step1); - // 数据写入的版本应为最新的版本 + // The version of data writing should be the latest version sn = correctedSn; - // 第二步:PasteChunk,写[0kb, 8kb]区域 + // Step 2: PasteChunk, write the [0kb, 8kb] area RangeData step2Data; step2Data.offset = 0; step2Data.length = 2 * PAGE_SIZE; @@ -739,7 +739,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, step2Data); list.Add(step2); - // 第三步:PasteChunk,写[4kb, 12kb]区域 + // Step 3: PasteChunk, write the [4kb, 12kb] area RangeData step3Data; step3Data.offset = PAGE_SIZE; step3Data.length = 2 * PAGE_SIZE; @@ -748,7 +748,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step3Data); list.Add(step3); - // 第四步:通过PasteChunk 遍写chunk + // Step 4: Write the chunk through PasteChunk RangeData step4Data; step4Data.offset = 0; step4Data.length = CHUNK_SIZE; @@ -757,7 +757,7 @@ TEST_F(RestartTestSuit, RecoverTest) { std::make_shared(&dataStore_, id, sn, step4Data); list.Add(step4); - // 第五步:DeleteChunk + // Step 5: DeleteChunk std::shared_ptr step5 = std::make_shared(&dataStore_, id, sn); list.Add(step5); @@ -765,7 +765,7 @@ TEST_F(RestartTestSuit, RecoverTest) { ASSERT_TRUE(list.VerifyLogReplay()); } -// 按照实际用户使用从场景随机产生每一步的操作,校验一定操作个数下都能保证幂等性 +// Randomly generate each step of the operation from the scene based on actual user usage, and verify that a certain number of operations can ensure idempotence TEST_F(RestartTestSuit, RandomCombine) { StepList list(clearFunc); @@ -775,7 +775,7 @@ TEST_F(RestartTestSuit, RandomCombine) { std::string location("test@s3"); std::srand(std::time(nullptr)); - // 写随机地址的数据,在[0, kMaxSize]范围内写 + // Write random address data within the range of [0, kMaxSize] auto randWriteOrPaste = [&](bool isPaste) { int pageCount = kMaxSize / PAGE_SIZE; RangeData stepData; @@ -793,9 +793,9 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 随机的克隆过程 + // Random cloning process auto randClone = [&]() { - // 二分之一概率,模拟恢复过程 + //Half probability, simulating the recovery process if (std::rand() % 2 == 0) correctedSn = 2; std::shared_ptr createStep = @@ -807,7 +807,7 @@ TEST_F(RestartTestSuit, RandomCombine) { location); list.Add(createStep); - // 克隆过程模拟5个操作,Write或者Paste,三分之一概率Write + // The cloning process simulates 5 operations, Write or Paste, with a one-third probability of Write for (int i = 0; i < 5; ++i) { if (std::rand() % 3 == 0) { randWriteOrPaste(false); @@ -816,7 +816,7 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 遍写一遍chunk,可以用于模拟后续写入创建快照 + // Write the chunk over and over again, which can be used to simulate subsequent writes and create snapshots RangeData pasteData; pasteData.offset = 0; pasteData.length = CHUNK_SIZE; @@ -826,11 +826,11 @@ TEST_F(RestartTestSuit, RandomCombine) { list.Add(pasteStep); }; - // 随机的快照过程 + // Random snapshot process auto randSnapshot = [&](int* stepCount) { - // 快照需要将版本+1 + // Snapshots require version+1 ++sn; - // 三分之一的概率调DeleteSnapshot,一旦调了DeleteSnapshot就退出快照 + // One third of the probability is to call DeleteSnapshot, and once DeleteSnapshot is called, it exits the snapshot while (true) { if (std::rand() % 3 == 0) { std::shared_ptr step = @@ -844,14 +844,14 @@ TEST_F(RestartTestSuit, RandomCombine) { } }; - // 创建clone chunk, + // Create a clone chunk randClone(); - // 设置最长执行步数 + // Set the maximum number of execution steps int maxSteps = 30; int stepCount = 0; while (stepCount < maxSteps) { - // 三分之一的概率会模拟快照过程 + // One-third of the probability will simulate the snapshot process if (std::rand() % 3 == 0) { randSnapshot(&stepCount); } else { @@ -860,7 +860,7 @@ TEST_F(RestartTestSuit, RandomCombine) { } } - // 最后删除chunk + // Finally, delete the chunk std::shared_ptr lastStep = std::make_shared(&dataStore_, id, sn); list.Add(lastStep); diff --git a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp index 61dc402c21..3b13655b9f 100644 --- a/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_snapshot_case_test.cpp @@ -36,14 +36,14 @@ class SnapshotTestSuit : public DatastoreIntegrationBase { }; /** - * 快照场景测试 - * 构造存在两个chunk的文件,分别为chunk1和chunk2,做如下操作 - * 1.写chunk1 - * 2.模拟第一次打快照,转储过程中写chunk1并产生快照,chunk2未发生数据写入 - * 3.删除快照,然后向chunk2中写入数据 - * 4.模拟第二次打快照,转储过程中写chunk1,但是不写chunk2 - * 5.删除快照,再次向chunk2写入数据 - * 6.删除文件 + * Snapshot scenario testing + * Construct a file with two chunks, chunk1 and chunk2, as follows + * 1. Write chunk1 + * 2. Simulate the first snapshot taken, write chunk1 during the dump process and generate a snapshot, but chunk2 does not have data write + * 3. Delete the snapshot and write data to chunk2 + * 4. Simulate taking a second snapshot, writing chunk1 during the dump process, but not chunk2 + * 5. Delete the snapshot and write data to chunk2 again + * 6. Delete files */ TEST_F(SnapshotTestSuit, SnapshotTest) { SequenceNum fileSn = 1; @@ -55,9 +55,9 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { CSChunkInfo chunk1Info; CSChunkInfo chunk2Info; - /******************构造初始环境,创建chunk1******************/ + /****************** Creating Initial Environment, Creating Chunk1 ******************/ - // 向chunk1的[0, 12KB)区域写入数据 "1" + // Write data '1' to the [0, 12KB) area of chunk1 offset = 0; length = 3 * PAGE_SIZE; // 12KB char buf1_1[3 * PAGE_SIZE]; @@ -70,12 +70,12 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - /******************场景一:第一次给文件打快照******************/ + /******************Scene 1: Take the first snapshot of the file******************/ - // 模拟打快照,此时文件版本递增 + // Simulate taking a snapshot, where the file version increases ++fileSn; // fileSn == 2 - // 向chunk1的[4KB, 8KB)区域写入数据 “2” + // Write data '2' to the [4KB, 8KB] area of chunk1 offset = 1 * PAGE_SIZE; length = 1 * PAGE_SIZE; char buf1_2[3 * PAGE_SIZE]; @@ -87,7 +87,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); @@ -96,7 +96,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { size_t readSize = 3 * PAGE_SIZE; char readbuf[3 * PAGE_SIZE]; - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该都是‘1’ + // Read the [0, 12KB) area of the chunk1 snapshot file, and the data read should all be '1' errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id 1, // snap sn readbuf, @@ -105,7 +105,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // 重复写入,验证不会重复cow,读快照时[4KB, 8KB)区域的数据应为“1” + // Repeat write, verify that there will be no duplicate rows, and when reading the snapshot, the data in the [4KB, 8KB] area should be '1' errorCode = dataStore_->WriteChunk(id1, // id fileSn, buf1_2, @@ -114,7 +114,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写未cow过的区域,写入[0,4kb]区域 + // Write to an uncooked area, write to the [0,4kb] area offset = 0; length = PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id @@ -125,7 +125,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 写部分cow过的区域,写入[4kb,12kb]区域 + // Write the area that has been partially cowed, and write the [4kb, 12kb] area offset = PAGE_SIZE; length = 2 * PAGE_SIZE; errorCode = dataStore_->WriteChunk(id1, // id @@ -136,16 +136,16 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk1Info.curSn); ASSERT_EQ(1, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB):1 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be [0,12KB]:2 + // The data content returned from reading chunk1 snapshot should be [0, 12KB):1 + // The data in other address spaces can be guaranteed without any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id1, // chunk id @@ -156,7 +156,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk1快照文件的[0, 12KB)区域,读出来数据应该还是‘1’ + // When reading the [0, 12KB) area of the chunk1 snapshot file, the read data should still be '1' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id @@ -167,7 +167,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_1, readbuf, readSize)); - // ReadSnapshotChun,请求offset+length > page size + // ReadSnapshotChun, request offset+length > page size offset = CHUNK_SIZE - PAGE_SIZE; readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); @@ -178,7 +178,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { readSize); ASSERT_EQ(errorCode, CSErrorCode::InvalidArgError); - // 读chunk2快照文件,返回ChunkNotExistError + // Read chunk2 snapshot file and return ChunkNotExistError readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id @@ -188,23 +188,23 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { readSize); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - /******************场景二:第一次快照结束,删除快照******************/ + /******************Scene 2: First snapshot completes, delete snapshot******************/ - // 请求删chunk1的快照,返回成功,并删除快照 + // Request to delete the snapshot of chunk1, return success, and delete the snapshot errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 向chunk2的[0, 8KB)区域写入数据 "a" + // Write data 'a' to the [0, 8KB) area of chunk2 offset = 0; length = 2 * PAGE_SIZE; // 8KB char buf2_2[2 * PAGE_SIZE]; @@ -216,19 +216,19 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(0, chunk2Info.correctedSn); - /******************场景三:第二次打快照******************/ + /******************Scene 3: Take second snapshot******************/ - // 模拟第二次打快照,版本递增 + // Simulate taking a second snapshot and increasing the version ++fileSn; // fileSn == 3 - // 向chunk1的[0KB, 8KB)区域写入数据 “3” + // Write data '3' to the [0KB, 8KB) area of chunk1 offset = 0; length = 2 * PAGE_SIZE; char buf1_3[2 * PAGE_SIZE]; @@ -240,16 +240,16 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { length, nullptr); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 可以获取到chunk1的信息,且各项信息符合预期 + // Information on chunk1 can be obtained, and all information meets expectations errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(2, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 此时读chunk1返回数据内容应该为[0,8KB]:3,[8KB, 12KB]:2 - // 读chunk1快照返回的数据内容应该为[0, 12KB]:2 - // 其余地址空间的数据可以不用保证 + // At this point, the data content returned by reading chunk1 should be [0,8KB]:3, [8KB, 12KB]:2 + // The data content returned from reading chunk1 snapshot should be [0, 12KB]:2 + // The data in other address spaces can be guaranteed without any need readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadChunk(id1, // chunk id @@ -261,7 +261,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { ASSERT_EQ(0, memcmp(buf1_3, readbuf, 2 * PAGE_SIZE)); ASSERT_EQ(0, memcmp(buf1_2, readbuf + 2 * PAGE_SIZE, 1 * PAGE_SIZE)); - // 读chunk1快照文件的[0, 12KB)区域,数据内容为‘2’ + // Read the [0, 12KB) area of the chunk1 snapshot file, with data content of '2' readSize = 3 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id1, // chunk id @@ -272,7 +272,7 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf1_2, readbuf, readSize)); - // 读chunk2快照返回的数据内容应该为[0, 8KB):a,其余地址空间的数据可以不用保证 + // The data content returned by reading the chunk2 snapshot should be [0, 8KB): a, and the data in the other address spaces can be guaranteed without any need to readSize = 2 * PAGE_SIZE; memset(readbuf, 0, sizeof(readbuf)); errorCode = dataStore_->ReadSnapshotChunk(id2, // chunk id @@ -283,29 +283,29 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(0, memcmp(buf2_2, readbuf, readSize)); - /******************场景四:第二次快照结束,删除快照******************/ + /******************Scene 4: Second snapshot completes, delete snapshot******************/ - // 请求删chunk1的快照,返回成功 + // Request to delete snapshot of chunk1, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk1信息,符合预期 + // Check chunk1 information, as expected errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk1Info.curSn); ASSERT_EQ(0, chunk1Info.snapSn); ASSERT_EQ(0, chunk1Info.correctedSn); - // 请求删chunk2的快照,返回成功 + // Request to delete the snapshot of chunk2, returned success errorCode = dataStore_->DeleteSnapshotChunkOrCorrectSn(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); - // 检查chunk2信息,符合预期 + // Check chunk2 information, as expected errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(2, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 向chunk2的[0KB, 4KB)区域写入数据 “b” + // Write data 'b' to the [0KB, 4KB) area of chunk2 offset = 0; length = 1 * PAGE_SIZE; char buf2_3[1 * PAGE_SIZE]; @@ -316,36 +316,36 @@ TEST_F(SnapshotTestSuit, SnapshotTest) { offset, length, nullptr); - // 检查chunk2信息,符合预期,curSn变为3,不会产生快照 + // Check chunk2 information, as expected, curSn becomes 3, no snapshot will be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - // 再次向chunk2的[0KB, 8KB)区域写入数据 + // Write data to the [0KB, 8KB) area of chunk2 again errorCode = dataStore_->WriteChunk(id2, // id fileSn, buf2_3, offset, length, nullptr); - // 检查chunk2信息,chunk信息不变,不会产生快照 + // Check chunk2 information, chunk information remains unchanged and no snapshot will be generated errorCode = dataStore_->GetChunkInfo(id2, &chunk2Info); ASSERT_EQ(errorCode, CSErrorCode::Success); ASSERT_EQ(fileSn, chunk2Info.curSn); ASSERT_EQ(0, chunk2Info.snapSn); ASSERT_EQ(fileSn, chunk2Info.correctedSn); - /******************场景五:用户删除文件******************/ + /******************Scene 5: User Deletes File******************/ - // 此时删除Chunk1,返回Success + // At this point, delete Chunk1 and return to Success errorCode = dataStore_->DeleteChunk(id1, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id1, &chunk1Info); ASSERT_EQ(errorCode, CSErrorCode::ChunkNotExistError); - // 此时删除Chunk2,返回Success + // At this point, delete Chunk2 and return to Success errorCode = dataStore_->DeleteChunk(id2, fileSn); ASSERT_EQ(errorCode, CSErrorCode::Success); errorCode = dataStore_->GetChunkInfo(id2, &chunk1Info); diff --git a/test/integration/chunkserver/datastore/datastore_stress_test.cpp b/test/integration/chunkserver/datastore/datastore_stress_test.cpp index 2364d61dd2..63b2c7d8c5 100644 --- a/test/integration/chunkserver/datastore/datastore_stress_test.cpp +++ b/test/integration/chunkserver/datastore/datastore_stress_test.cpp @@ -92,27 +92,27 @@ TEST_F(StressTestSuit, StressTest) { printf("===============TEST WRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 0, 10000); - // 10个线程 + // 10 threads RunStress(10, 0, 50000); - // 50个线程 + // 50 threads RunStress(50, 0, 100000); printf("===============TEST READ==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 100, 10000); - // 10个线程 + // 10 threads RunStress(10, 100, 50000); - // 50个线程 + // 50 threads RunStress(50, 100, 100000); printf("===============TEST READWRITE==================\n"); - // 测试单线程性能 + // Testing Single Thread Performance RunStress(1, 50, 10000); - // 10个线程 + // 10 threads RunStress(10, 50, 50000); - // 50个线程 + // 50 threads RunStress(50, 50, 100000); } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index dca71bdaf3..92643040fb 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -143,7 +143,7 @@ class CSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22233", "127.0.0.1:22234", std::vector{ @@ -151,7 +151,7 @@ class CSModuleException : public ::testing::Test { LOG(INFO) << "etcd 1 started on 127.0.0.1:22233:22234, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other two mds nodes pid = cluster->StartSingleMDS(1, "127.0.0.1:22122", 22128, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22122, pid = " << pid; @@ -168,7 +168,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -190,7 +190,7 @@ class CSModuleException : public ::testing::Test { retry++; } - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; @@ -207,7 +207,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -228,15 +228,15 @@ class CSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } @@ -282,12 +282,12 @@ class CSModuleException : public ::testing::Test { } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内能够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be restored + * @param[out]: failCount is the number of error returns in the current io distribution + * @return: If io can be issued normally within the expected time, return true; otherwise, return false */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS, uint64_t* failCount = nullptr) { @@ -335,7 +335,7 @@ class CSModuleException : public ::testing::Test { failCount == nullptr ? 0 : (*failCount = ioFailedCount); - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -345,7 +345,7 @@ class CSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Is there a failure to hang and uninstall bool createOrOpenFailed; bool createDone; std::mutex createMtx; @@ -354,173 +354,173 @@ class CSModuleException : public ::testing::Test { CurveCluster* cluster; }; -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +//Test environment topology: Start one client, three chunkservers, three mds, and one etcd on a single node TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: KillOneChunkserverThenRestartTheChunkserver"; /********* KillOneChunkserverThenRestartTheChunkserver **********/ - // 1. 测试重启一个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill一台chunkserver:client 读写请求最多卡顿 - // election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1.. Test restarting a chunkserver + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Kill a chunkserver: The client's read and write requests are stuck at most + // election_timeout*2s can read and write normally + // c. Restoring chunkserver: Client read and write requests have no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉一个chunkserver + // 2. Kill a chunkserver ASSERT_EQ(0, cluster->StopChunkServer(1)); - // 3. kill掉一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After killing a chunkserver, the client's IO is expected to recover at most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver + // 4. Pull up the chunkserver that was just killed pid_t pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangOneChunkserverThenResumeTheChunkserver"; /********* HangOneChunkserverThenResumeTheChunkserver ***********/ - // 1. hang一台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台chunkserver:client - // 读写请求最多卡顿election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang a chunk server, and then restore the hang's chunk server + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Hang a chunkserver: client + // Read and write requests may experience a maximum delay of selection_timeout*2s for normal read and write operations + // c. Restoring chunkserver: Client read and write requests have no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一个chunkserver + // 2. Hang a chunkserver ASSERT_EQ(0, cluster->HangChunkServer(1)); - // 3. hang一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After hanging a chunkserver, the client's IO is expected to recover at most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver + // 4. Pull up the chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoChunkserverThenRestartTheChunkserver"; /******** KillTwoChunkserverThenRestartTheChunkserver *********/ - // 1. 测试重启两个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill两台chunkserver:预期client IO持续hang,新写IO和覆盖写都hang - // 拉起被kill中的一台chunkserver:client IO预期在最多在 - // (chunkserver启动回放数据+2*election_timeout)时间内恢复读写 - // c. 拉起另外一台kill的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting two chunkservers + // 2.. Expected: + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Kill two chunkservers: expected client IO to continue to hang, new write IO and overwrite write both hang + // Pulling up a chunkserver in the kill: client IO is expected to be at most + // Restore read and write within (chunkserver starts playback of data+2 * selection_timeout) time + // c. Pulling up another kill chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉两个chunkserver + // 2. Kill two chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); - // 3. kill掉两个chunkserver, io无法正常下发 + // 3. Kill two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 拉起刚才被kill的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just killed, + // The client's IO is expected to recover at most 2 * electtime + //If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunk server that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangTwoChunkserverThenResumeTheChunkserver"; /******* HangTwoChunkserverThenResumeTheChunkserver **********/ - // 1. hang两台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang两台chunkserver:client IO持续hang,新写IO和覆盖写都hang - // c. 恢复其中的一台chunkserver:client IO 恢复读写, - // 从恢复chunkserver到client IO恢复时间在election_timeout*2 - // d. 恢复另外一台hang的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two chunkservers, and then restore Hang's chunkservers + // 2.. Expectations + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Hang two chunkservers: client IO continues to hang, while new write IO and overwrite write both hang + // c. Restore one of the chunkservers: client IO restores read and write, + // Recovery time from chunkserver to client IO during election_ Timeout * 2 + // d. Restoring another hang's chunkserver: client IO has no impact + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉两个个chunkserver + // 2. Hang off two chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); - // 3. hang两个chunkserver, io无法正常下发 + // 3. Hang two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 拉起刚才被hang的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just hung, + //The client's IO is expected to recover at most 2 * electtime + //If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeChunkserverThenRestartTheChunkserver"; /******** KillThreeChunkserverThenRestartTheChunkserver ******/ - // 1. 测试重启三个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭三台chunkserver:client IO hang - // c. 重启一台chunkserver:client IO hang - // d. 重启第二台chunkserver:client IO hang, - // 直到chunkserver完全恢复,IO恢复。 - // 恢复时间约等于(chunkserver启动回放数据+2*election_timeout) - // e. 重启第三台chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting three chunkservers + // 2. Expected: + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Close three chunkservers: client IO hang + // c. Restart a chunkserver: client IO hang + // d. Restart the second chunkserver: client IO hang, + // Until the chunkserver is fully restored and IO is restored. + // The recovery time is approximately equal to (chunkserver starts playback data+2 * election_timeout) + // e. Restarting the third chunkserver: No impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉三个chunkserver + // 2. Kill three chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); ASSERT_EQ(0, cluster->StopChunkServer(3)); - // 3. kill掉三个chunkserver, io无法正常下发 + // 3. Kill three chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. client的io恢复 + // 7. Client's IO recovery ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 8. 拉起其他被kil的chunkserver + // 8. Pull up other chunkservers that have been killed pid = cluster->StartSingleChunkServer(3, "127.0.0.1:22127", chunkserverConf6); LOG(INFO) << "chunkserver 3 started on 127.0.0.1:22127, pid = " << pid; @@ -528,37 +528,37 @@ TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: HangThreeChunkserverThenResumeTheChunkserver"; /******** HangThreeChunkserverThenResumeTheChunkserver **********/ - // 1. hang三台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang三台chunkserver:client IO hang - // c. 恢复一台chunkserver:client IO hang - // d. 再恢复一台chunkserver:预期在 - // election_timeout*2左右的时间,client IO恢复 - // e. 恢复最后一台chunkserver:预期client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three chunkservers, and then restore Hang's chunkservers + // 2. Expectations + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Hang three chunkservers: client IO hang + // c. Restore a chunkserver: client IO hang + // d. Restore another chunkserver: expected to be + // election_ About timeout * 2, client IO recovery + // e. Restore the last chunkserver: Expected no impact on client IO + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉三个chunkserver + // 2. Hang down three chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); ASSERT_EQ(0, cluster->HangChunkServer(3)); - // 3. hang三个chunkserver, io无法正常下发 + // 3. Hang three chunkservers, IO cannot be distributed normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); ASSERT_EQ(0, cluster->RecoverHangChunkServer(3)); - // 7. client的io预期最多会在2*electtime s内恢复 - // 如果配置了慢启动,则需要等待 + // 7. The client's IO is expected to recover within a maximum of 2 * electtime seconds + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); } diff --git a/test/integration/client/common/file_operation.cpp b/test/integration/client/common/file_operation.cpp index 44dfc186a5..57f795f985 100644 --- a/test/integration/client/common/file_operation.cpp +++ b/test/integration/client/common/file_operation.cpp @@ -43,7 +43,7 @@ int FileCommonOperation::Open(const std::string& filename, memset(userinfo.owner, 0, 256); memcpy(userinfo.owner, owner.c_str(), owner.size()); - // 先创建文件 + // Create a file first int ret = Create(filename.c_str(), &userinfo, 100*1024*1024*1024ul); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret @@ -51,7 +51,7 @@ int FileCommonOperation::Open(const std::string& filename, return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; @@ -84,7 +84,7 @@ int FileCommonOperation::Open(const std::string& filename, context.stripeUnit = stripeUnit; context.stripeCount = stripeCount; - // 先创建文件 + // Create a file first int ret = globalclient->Create2(context); if (ret != LIBCURVE_ERROR::OK && ret != -LIBCURVE_ERROR::EXISTS) { LOG(ERROR) << "file create failed! " << ret @@ -92,7 +92,7 @@ int FileCommonOperation::Open(const std::string& filename, return -1; } - // 再打开文件 + // Reopen File int fd = ::Open(filename.c_str(), &userinfo); if (fd < 0 && ret != -LIBCURVE_ERROR::FILE_OCCUPIED) { LOG(ERROR) << "Open file failed!"; diff --git a/test/integration/client/common/file_operation.h b/test/integration/client/common/file_operation.h index 0414146eff..6514a6cd81 100644 --- a/test/integration/client/common/file_operation.h +++ b/test/integration/client/common/file_operation.h @@ -31,7 +31,7 @@ namespace test { class FileCommonOperation { public: /** - * 指定文件名,打开文件,如果没创建则先创建,返回fd + * Specify a file name, open the file, if not created, create it first, return fd */ static int Open(const std::string& filename, const std::string& owner); diff --git a/test/integration/client/mds_exception_test.cpp b/test/integration/client/mds_exception_test.cpp index 4cf9f8ede3..01cf2765d5 100644 --- a/test/integration/client/mds_exception_test.cpp +++ b/test/integration/client/mds_exception_test.cpp @@ -149,14 +149,14 @@ class MDSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22230", "127.0.0.1:22231", std::vector{"--name=module_exception_test_mds"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22230:22231, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other two mds nodes pid = cluster->StartSingleMDS(0, "127.0.0.1:22222", 22240, mdsConf, true); LOG(INFO) << "mds 0 started on 127.0.0.1:22222, pid = " << pid; @@ -173,7 +173,7 @@ class MDSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -196,7 +196,7 @@ class MDSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22225", chunkserverConf1); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22225, pid = " << pid; @@ -212,7 +212,7 @@ class MDSModuleException : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -233,15 +233,15 @@ class MDSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); ipmap[0] = "127.0.0.1:22222"; @@ -299,12 +299,12 @@ class MDSModuleException : public ::testing::Test { } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内嫩够正常下发,则返true,否则返回false + * Monitor whether client io can be issued normally within the expected time + * @param: off is the offset that currently requires issuing IO + * @param: size is the size of the distributed io + * @param: predictTimeS is the expected number of seconds in which IO can be restored + * @param[out]: failCount is the number of error returns in the current io distribution + * @return: If the io is issued normally within the expected time, return true; otherwise, return false */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS) { inflightContl.SetMaxInflightNum(16); @@ -352,7 +352,7 @@ class MDSModuleException : public ::testing::Test { ret = resumeFlag; } - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -360,16 +360,16 @@ class MDSModuleException : public ::testing::Test { return ret; } - /**下发一个写请求 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否下发成功 + /** Send a write request + * @param: offset is the offset that currently requires issuing IO + * @param: size is the size of the issued IO + * @return: Whether the IO was successfully issued */ bool SendAioWriteRequest(uint64_t offset, uint64_t size) { writeIOReturnFlag = false; auto writeCallBack = [](CurveAioContext* context) { - // 无论IO是否成功,只要返回,就置为true + // Regardless of whether IO is successful or not, as long as it returns, it is set to true writeIOReturnFlag = true; char* buffer = reinterpret_cast(context->buf); delete[] buffer; @@ -388,11 +388,11 @@ class MDSModuleException : public ::testing::Test { return AioWrite(fd, context) == 0; } - /** 下发一个写请求并读取进行数据验证 - * @param: fd 卷fd - * @param: 当前需要下发io的偏移 - * @param:下发io的大小 - * @return: 数据是否一致 + /** Send a write request and read for data validation + * @param: fd volume fd + * @param: The offset that currently needs to be issued for IO + * @param: The size of the distributed IO + * @return: Whether the data is consistent */ void VerifyDataConsistency(int fd, uint64_t offset, uint64_t size) { char* writebuf = new char[size]; @@ -405,7 +405,7 @@ class MDSModuleException : public ::testing::Test { writebuf[i] = ('a' + std::rand() % 26); } - // 开始写 + // Start writing auto wcb = [](CurveAioContext* context) { if (context->ret == context->length) { testIOWrite = true; @@ -434,7 +434,7 @@ class MDSModuleException : public ::testing::Test { writeThread.join(); ASSERT_TRUE(testIOWrite); - // 开始读 + // Start reading auto rcb = [](CurveAioContext* context) { if (context->ret == context->length) { testIORead = true; @@ -471,7 +471,7 @@ class MDSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Whether mounting or unmounting fails. bool createOrOpenFailed; bool createDone; std::mutex createMtx; @@ -484,40 +484,40 @@ class MDSModuleException : public ::testing::Test { }; #define segment_size 1 * 1024 * 1024 * 1024ul -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, and one etcd on a single node TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: KillOneInserviceMDSThenRestartTheMDS"; /********** KillOneInserviceMDSThenRestartTheMDS *************/ - // 1. 重启一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台mds,在mds服务切换到另一台mds之前, - // client 新写IO会hang,挂卸载服务会异常 - // c. mds服务切换后,预期client IO无影响,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restarting a currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. When shutting down an MDS, before the MDS service switches to another MDS, + // new write IO from clients will hang, and mount/unmount services will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will be unaffected, and mount/unmount services will be normal. + // d. When bringing the MDS back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Kill an MDS that is currently in service, and when it is started, the first MDS is selected as the leader int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration (20s renewal) ASSERT_TRUE(MonitorResume(segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid_t pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -525,85 +525,85 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillOneNotInserviceMDSThenRestartTheMDS"; /*********** KillOneNotInserviceMDSThenRestartTheMDS *******/ - // 1. 重启一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Restart an MDS that is not in service + // 2. Expectations + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Turn off an MDS that is not in service, expect no impact on client IO, and suspend and uninstall the service normally + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill一台不在服务的mds,在启动的时候第一台mds当选leader, kill第二台 + // 2. Kill an MDS that is not in service. When starting, the first MDS is selected as the leader, and kill the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int killid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(killid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration (20s renewal) ASSERT_TRUE(MonitorResume(2 * segment_size, 4096, 25)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(killid, ipmap[killid], 22240 + killid, configmap[killid], false); LOG(INFO) << "mds " << killid << " started on " << ipmap[killid] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneInserviceMDSThenResumeTheMDS"; /************ hangOneInserviceMDSThenResumeTheMDS ********/ - // 1. hang一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 0. 先睡眠一段时间等待mds集群选出leader + // 1. Hang one of the currently serving MDS. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. During the MDS hang period and before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC sent to the MDS will keep timing out, leading to retries + // that eventually fail. + // c. The client session renewal duration is longer than the lease renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal failure occurs. + // Therefore, the client's session will not expire, and overwrite writes will not result in exceptions. + // d. When the hung MDS is restored, it is expected to have no impact on client IO. + // 0. First, sleep for a period of time to allow the MDS cluster to elect a leader. std::this_thread::sleep_for(std::chrono::seconds(10)); - // 1. 集群最初状态,io正常下发 + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台正在服务的mds,在启动的时候第一台mds当选leader + // 2. Hang an MDS that is currently in service, and when it is started, the first MDS is selected as the leader serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->HangMDS(serviceMDSID)); - // 3. 启动后台挂卸载线程,预期挂卸载会出现失败 + // 3. Start the background suspend and unload thread, and expect the suspend and unload to fail CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration (20s renewal) auto ret = MonitorResume(3 * segment_size, 4096, 25); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_TRUE(false); } - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], @@ -614,39 +614,39 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被kill的mds,对集群没有影响 + // 7. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangOneNotInserviceMDSThenResumeTheMDS"; /********** hangOneNotInserviceMDSThenResumeTheMDS ***********/ - // 1. hang一台不在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // 1. 集群最初状态,io正常下发 + // 1. Hang an out of service MDS + // 2. Expectations + // a. When the cluster status is normal: client read and write requests can be issued normally + // b. Hang an MDS that is not in service, expecting no impact on client IO, and suspending and uninstalling the service is normal + // 1. The initial state of the cluster, IO is issued normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一台不在服务的mds,在启动的时候第一台mds当选leader, hang第二台 + // 2. Hang an MDS that is not in service. When starting, the first MDS is selected as the leader, and hang the second MDS serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int hangid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->HangMDS(hangid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务不会受影响 + // 3. Start the backend suspend and uninstall thread, and it is expected that the suspend and uninstall service will not be affected CreateOpenFileBackend(); - // 4. 启动后台iops监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start backend iops monitoring and start writing from the next segment to trigger getorallocate logic + // follower mds cluster normal service after renewing session expiration (20s renewal) ret = MonitorResume(4 * segment_size, 4096, 25); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(hangid)); ASSERT_EQ(0, cluster->StopMDS(hangid)); pid = cluster->StartSingleMDS(hangid, ipmap[hangid], 22240 + hangid, @@ -657,42 +657,42 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoInserviceMDSThenRestartTheMDS"; /************* KillTwoInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中一台正在服务的mds - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,在mds服务切换到另一台mds之前, - // client 新写IO会出现失败,挂卸载服务会异常 - // c. mds服务切换后,预期client IO恢复,挂卸载服务正常 - // d. 重新拉起mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, one of which is currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. When shutting down two MDS nodes, before the MDS service switches to another MDS, + // new write IO from clients will fail, and mount/unmount services will behave abnormally. + // c. After the MDS service switches, it is expected that client IO will recover, and mount/unmount services will be normal. + // d. When bringing the MDS nodes back up, client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill两台mds,在启动的时候第一台mds当选leader, kill前二台 + // 2. Kill two MDSs. When starting, the first MDS is selected as the leader, and kill the first two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int secondid = (serviceMDSID + 1) % 3; ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); ASSERT_EQ(0, cluster->StopMDS(secondid)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration (20s renewal) ASSERT_TRUE(MonitorResume(5 * segment_size, 4096, 25)); - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation monitoring WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(serviceMDSID, ipmap[serviceMDSID], 22240 + serviceMDSID, configmap[serviceMDSID], false); @@ -700,10 +700,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 再拉起被kill的mds,对集群没有影响 + // 8. Pulling up the killed mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起被kill的其他mds + // 9. Pull up other mds killed pid = cluster->StartSingleMDS(secondid, ipmap[secondid], 22240 + secondid, configmap[secondid], false); LOG(INFO) << "mds " << secondid << " started on " << ipmap[secondid] @@ -712,18 +712,18 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: KillTwoNotInserviceMDSThenRestartTheMDS"; /******** KillTwoNotInserviceMDSThenRestartTheMDS ***********/ - // 1. 重启两台mds,其中两台都不在服务 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭两台mds,预期client IO无影响,挂卸载服务正常 - // c. 重启这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Restart two MDS nodes, with both nodes not currently serving. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. When shutting down two MDS nodes, it is expected that client IO will be unaffected, and mount/unmount services will be normal. + // c. When restarting these two MDS nodes, it is expected that client IO will be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. kill两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 3. Kill two MDSs. When starting, the first MDS is selected as the leader, and kill the second two MDSs serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); int tempid_1 = (serviceMDSID + 1) % 3; @@ -731,27 +731,27 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->StopMDS(tempid_1)); ASSERT_EQ(0, cluster->StopMDS(tempid_2)); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + //Killing mds that are not in service has no impact on the cluster ASSERT_TRUE(MonitorResume(6 * segment_size, 4096, 10)); - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_FALSE(createOrOpenFailed); - // 7. 拉起被kill的进程 + // 7. Pulling up the process of being killed pid = cluster->StartSingleMDS(tempid_1, ipmap[tempid_1], 22240 + tempid_1, configmap[tempid_1], false); LOG(INFO) << "mds " << tempid_1 << " started on " << ipmap[tempid_1] << ", pid = " << pid; ASSERT_GT(pid, 0); - // 8. 集群没有影响 + // 8. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 9. 拉起其他mds,使集群恢复正常 + // 9. Pull up other mds to restore the cluster to normal pid = cluster->StartSingleMDS(tempid_2, ipmap[tempid_2], 22240 + tempid_2, configmap[tempid_2], false); LOG(INFO) << "mds " << tempid_2 << " started on " << ipmap[tempid_2] @@ -760,17 +760,17 @@ TEST_F(MDSModuleException, MDSExceptionTest) { LOG(INFO) << "current case: hangTwoInserviceMDSThenResumeTheMDS"; /******** hangTwoInserviceMDSThenResumeTheMDS ************/ - // 1. hang两台mds,其中包含一台正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. mds hang期间且在与etcd续约超时之前,这时候新写IO会失败, - // 因为新写触发getorallocate,这个RPC发到mds会出现一直超时,然后重试 - // 最后重试失败。 - // c. client session续约时长总比mds与etcd之间续约时长大,所以在 - // session续约失败之前mds预期可以完成切换,所以client的session - // 不会过期,覆盖写不会出现异常。 - // d. 恢复被hang的mds,预期对client io无影响 - // 1. hang两台mds,在启动的时候第一台mds当选leader, hang前二台 + // 1. Hang two MDS nodes, one of which is currently serving, and then recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. During the MDS hang period and before the lease renewal with etcd times out, new write IO will fail. + // This is because a new write triggers getorallocate, and the RPC sent to the MDS will keep timing out, leading to retries + // that eventually fail. + // c. The client session renewal duration is longer than the lease renewal duration between MDS and etcd. + // So, MDS is expected to complete the switch before session renewal failure occurs. + // Therefore, the client's session will not expire, and overwrite writes will not result in exceptions. + // d. When the hung MDS nodes are recovered, it is expected to have no impact on client IO. + // 1. Hang two MDS nodes, with the first MDS being elected as leader during startup, and both being hung before the process. serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = serviceMDSID; @@ -778,12 +778,12 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that the suspend and uninstall service will be affected CreateOpenFileBackend(); LOG(INFO) << "monitor resume start!"; - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // follower mds在session过期后重新续约后集群正常服务(20s续约) + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + // follower mds cluster normal service after renewing session expiration (20s renewal) ret = MonitorResume(7 * segment_size, 4096, 25); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); @@ -792,11 +792,11 @@ TEST_F(MDSModuleException, MDSExceptionTest) { } LOG(INFO) << "monitor resume done!"; - // 5. 等待后台挂卸载监测结束 + // 5. Waiting for the end of backend suspension and uninstallation monitoring WaitBackendCreateDone(); LOG(INFO) << "wait backend create thread done!"; - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -814,20 +814,20 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_TRUE(createOrOpenFailed); - // 7. 再拉起被hang的mds,对集群没有影响 + // 7. Pulling up the hung mds again has no impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: hangTwoNotInserviceMDSThenResumeTheMDS"; /********** hangTwoNotInserviceMDSThenResumeTheMDS ********/ - // 1. hang两台mds,其中不包含正在服务的mds,然后恢复 - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台不在服务的mds,预期client IO无影响,挂卸载服务正常 - // c. 恢复这两台mds,client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two MDS nodes, neither of which is currently serving, and then recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. Hang one MDS node that is not currently serving. It is expected that client IO will be unaffected, and mount/unmount services will behave normally. + // c. When these two MDS nodes are recovered, client IO is expected to be unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang两台mds,在启动的时候第一台mds当选leader, kill后二台 + // 2. Hang two mds, the first mds is selected as the leader when starting, and kill the second two mds serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); tempid_1 = (serviceMDSID + 1) % 3; @@ -835,11 +835,11 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_EQ(0, cluster->HangMDS(tempid_1)); ASSERT_EQ(0, cluster->HangMDS(tempid_2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 启动后台io监测, 从下一个segment开始写,使其触发getorallocate逻辑 - // 不在服务的mds被kill对集群没有影响 + // 4. Start background IO monitoring and start writing from the next segment to trigger the getorallocate logic + //Killing mds that are not in service has no impact on the cluster ret = MonitorResume(8 * segment_size, 4096, 10); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); @@ -847,10 +847,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 5. 等待挂卸载监测结束 + // 5. Waiting for the end of suspend/unload monitoring WaitBackendCreateDone(); - // 6. 挂卸载服务正常 + // 6. Hanging and uninstalling service is normal ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_1)); ASSERT_EQ(0, cluster->RecoverHangMDS(tempid_2)); ASSERT_EQ(0, cluster->StopMDS(tempid_1)); @@ -868,41 +868,41 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_GT(pid, 0); ASSERT_FALSE(createOrOpenFailed); - // 7. 集群没有影响 + // 7. Cluster has no impact ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeMDSThenRestartTheMDS"; /********* KillThreeMDSThenRestartTheMDS *********/ - // 1. 重启三台mds - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. kill三台mds:client 在session过期之后出现IO 失败 - // c. client session过期之前这段时间的新写会失败,覆盖写不影响 - // d. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // e. 恢复另外两台hang的mds,client io无影响 - - // 1. kill三台mds + // 1. Restart three MDS nodes. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. Kill all three MDS nodes: Client IO failures occur after session expiration. + // c. During the period before the client session expires, new writes will fail, but overwrite writes will not be affected. + // d. Recover one of the hung MDS nodes: Client session renewal succeeds, and IO returns to normal. + // e. Recover the other two hung MDS nodes: Client IO remains unaffected. + + // 1. Kill three MDSs ASSERT_EQ(0, cluster->StopAllMDS()); - // 确保mds确实退出了 + //Ensure that the mds has indeed exited std::this_thread::sleep_for(std::chrono::seconds(10)); - // 2. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 2. Starting the backend suspend and uninstall thread, it is expected that the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 3. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 3. Send an IO and sleep for a period of time to determine whether to return + // Due to writing from the next segment, it triggers the getorallocate logic + // MDS is no longer in service, write requests are constantly hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(9 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(30)); ASSERT_FALSE(writeIOReturnFlag); - // 4. 等待后台挂卸载监测结束 + // 4. Waiting for the end of backend suspension and uninstallation monitoring WaitBackendCreateDone(); - // 5. 判断当前挂卸载情况 + // 5. Determine the current suspension and uninstallation situation ASSERT_TRUE(createOrOpenFailed); - // 6. 拉起被kill的进程 + // 6. Pulling up the process of being killed pid = -1; while (pid < 0) { pid = @@ -911,49 +911,49 @@ TEST_F(MDSModuleException, MDSExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 7. 检测上次IO是否返回 + // 7. Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ASSERT_TRUE(MonitorResume(segment_size, 4096, 10)); - // 9. 再拉起被kill的进程 + // 9. Pull up the process of being killed again pid = cluster->StartSingleMDS(1, "127.0.0.1:22223", 22229, mdsConf, false); LOG(INFO) << "mds 1 started on 127.0.0.1:22223, pid = " << pid; ASSERT_GT(pid, 0); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 11. 拉起其他被kill的mds + // 11. Pull up other killed mds pid = cluster->StartSingleMDS(2, "127.0.0.1:22224", 22232, mdsConf, false); LOG(INFO) << "mds 2 started on 127.0.0.1:22224, pid = " << pid; ASSERT_GT(pid, 0); LOG(INFO) << "current case: hangThreeMDSThenResumeTheMDS"; /********** hangThreeMDSThenResumeTheMDS **************/ - // 1. hang三台mds,然后恢复 - // 2.预期 - // a. 集群状态正常:client读写请求可以正常下发 - // b. hang三台mds:client 在session过期之后出现IO hang - // c. client session过期之前这段时间的新写会一直hang,覆盖写不影响 - // e. 恢复其中hang的一台mds:client session重新续约成功,io恢复正常 - // f. 恢复另外两台hang的mds,client io无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three MDS nodes and then recover them. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write requests can be issued normally. + // b. Hang three MDS nodes: Client IO hangs after the session expires. + // c. During the period before the client session expires, new writes will hang continuously, but overwrite writes will not be affected. + // e. Recover one of the hung MDS nodes: Client session renewal succeeds, and IO returns to normal. + // f. Recover the other two hung MDS nodes: Client IO remains unaffected. + // 1. In the initial state of the cluster, IO can be issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang三台mds + // 2. Hang Three MDSs ASSERT_EQ(0, cluster->HangMDS(0)); ASSERT_EQ(0, cluster->HangMDS(1)); ASSERT_EQ(0, cluster->HangMDS(2)); - // 3. 启动后台挂卸载线程,预期挂卸载服务会受影响 + // 3. Starting the backend suspend and uninstall thread, it is expected that the suspend and uninstall service will be affected CreateOpenFileBackend(); - // 4. 下发一个io,sleep一段时间后判断是否返回 - // 由于从下一个segment开始写,使其触发getorallocate逻辑 - // MDS全部不在服务,写请求一直hang,无法返回 + // 4. Send an IO and sleep for a period of time to determine whether to return + // Due to writing from the next segment, it triggers the getorallocate logic + // MDS is no longer in service, write requests are constantly hanging, unable to return ASSERT_TRUE(SendAioWriteRequest(10 * segment_size, 4096)); std::this_thread::sleep_for(std::chrono::seconds(3)); ret = writeIOReturnFlag; @@ -964,10 +964,10 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 5. 等待监测结束 + // 5. Waiting for monitoring to end WaitBackendCreateDone(); - // 6. 判断当前挂卸载情况 + // 6. Determine the current suspension and uninstallation situation if (!createOrOpenFailed) { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(1)); @@ -975,9 +975,9 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 7. 拉起被hang的进程, 有可能hang的进程因为长时间未与etcd握手, - // 导致其被拉起后就退出了,所以这里在recover之后再启动该mds, - // 这样保证集群中至少有一个mds在提供服务 + // 7. Pulling up the process being hung may result in the process not shaking hands with ETCD for a long time, + // After it was pulled up, it exited, so the mds was restarted after recover, + // This ensures that at least one mds in the cluster is providing services ASSERT_EQ(0, cluster->RecoverHangMDS(1)); ASSERT_EQ(0, cluster->StopMDS(1)); @@ -989,11 +989,11 @@ TEST_F(MDSModuleException, MDSExceptionTest) { std::this_thread::sleep_for(std::chrono::seconds(3)); } - // 检测上次IO是否返回 + // Check if the last IO returned std::this_thread::sleep_for(std::chrono::seconds(20)); ASSERT_TRUE(writeIOReturnFlag); - // 8. 新的mds开始提供服务 + // 8. New mds starts offering services ret = MonitorResume(segment_size, 4096, 1); if (!ret) { ASSERT_EQ(0, cluster->RecoverHangMDS(2)); @@ -1001,42 +1001,42 @@ TEST_F(MDSModuleException, MDSExceptionTest) { ASSERT_TRUE(false); } - // 9. 再拉起被hang的进程 + // 9. Pull up the process of being hung again ASSERT_EQ(0, cluster->RecoverHangMDS(2)); ASSERT_EQ(0, cluster->RecoverHangMDS(0)); - // 10. 对集群没有影响 + // 10. No impact on the cluster ASSERT_TRUE(MonitorResume(0, 4096, 1)); } TEST_F(MDSModuleException, StripeMDSExceptionTest) { LOG(INFO) << "current case: StripeMDSExceptionTest"; - // 1. 创建一个条带的卷 + // 1. Create a striped volume int stripefd = curve::test::FileCommonOperation::Open("/test2", "curve", 1024 * 1024, 8); ASSERT_NE(stripefd, -1); uint64_t offset = std::rand() % 5 * segment_size; - // 2. 进行数据的读写校验 + // 2. Perform data read and write verification VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); std::this_thread::sleep_for(std::chrono::seconds(60)); - // 3. kill 一台当前为leader的mds + // 3. Kill an MDS that is currently the leader LOG(INFO) << "stop mds."; int serviceMDSID = 0; cluster->CurrentServiceMDS(&serviceMDSID); ASSERT_EQ(0, cluster->StopMDS(serviceMDSID)); - // 4. 启动后台挂卸载线程 + // 4. Start the background suspend and unload thread CreateOpenFileBackend(); - // 5. 继续随机写数据进行校验 + // 5. Continue to randomly write data for verification offset = std::rand() % 5 * segment_size; LOG(INFO) << "when stop mds, write and read data."; VerifyDataConsistency(stripefd, offset, 128 *1024 *1024); - // 6. 等待挂卸载检测结果 + // 6. Waiting for the results of pending uninstallation detection WaitBackendCreateDone(); - // 7. 挂卸载服务正常 + // 7. Hanging and uninstalling service is normal ASSERT_TRUE(createOrOpenFailed); LOG(INFO) <<"start mds."; diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index cf1753ff2c..79f91b0350 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -165,7 +165,7 @@ struct ChunkserverParam { class UnstableCSModuleException : public ::testing::Test { protected: static void SetUpTestCase() { - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf ttt"); system("mkdir -p ttt"); @@ -175,7 +175,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster.reset(new CurveCluster()); ASSERT_NE(nullptr, cluster.get()); - // 生成配置文件 + // Generate Configuration File cluster->PrepareConfig(kMdsConfPath, mdsConfigOpts); cluster->PrepareConfig(kCSConfPath, @@ -183,7 +183,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster->PrepareConfig(kClientConfPath, clientConfigOpts); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ @@ -192,13 +192,13 @@ class UnstableCSModuleException : public ::testing::Test { << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 2. 启动一个mds + // 2. Start an mds pid = cluster->StartSingleMDS(1, kMdsIpPort, 30013, mdsConf, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(2)); - // 3. 创建物理池 + // 3. Creating a physical pool ASSERT_EQ( 0, cluster->PreparePhysicalPool( @@ -206,27 +206,27 @@ class UnstableCSModuleException : public ::testing::Test { "./test/integration/client/config/unstable/" "topo_unstable.json")); - // 4. 创建chunkserver + // 4. Create chunkserver StartAllChunkserver(); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池,并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first ASSERT_EQ(0, cluster->PrepareLogicalPool( 1, "test/integration/client/config/unstable/topo_unstable.json")); std::this_thread::sleep_for(std::chrono::seconds(10)); - // 6. 初始化client配置 + // 6. Initialize client configuration int ret = Init(kClientConfPath); ASSERT_EQ(ret, 0); - // 7. 先睡眠10s,让chunkserver选出leader + // 7. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } static void TearDownTestCase() { UnInit(); ASSERT_EQ(0, cluster->StopCluster()); - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); @@ -323,15 +323,15 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Close(fd); } -// 集群拓扑结构 -// 1个client -// 1个etcd -// 1个mds -// 3个zone,每个里面2个chunkserver +// Cluster topology +// 1 client +// 1 ETCD +// 1 mds +// 3 zones, each with 2 chunkservers inside TEST_F(UnstableCSModuleException, HangOneZone) { srand(time(nullptr)); - // 开启多个线程写文件 + // Enable multiple threads to write files LOG(INFO) << "starting write..."; running = true; std::vector openAndWriteThreads; @@ -341,7 +341,7 @@ TEST_F(UnstableCSModuleException, HangOneZone) { "/test" + std::to_string(i)); } - // 正常写入60s, 并记录后30秒的iops + // Write normally for 60 seconds and record the IOPS for the next 30 seconds std::vector beforeRecords; std::this_thread::sleep_for(std::chrono::seconds(30)); for (int i = 1; i <= 30; ++i) { @@ -353,18 +353,18 @@ TEST_F(UnstableCSModuleException, HangOneZone) { beforeRecords.size(); LOG(INFO) << "iops before hang: " << beforeAvgIOps; - // hang一个zone的chunkserver + // Hang a chunkserver for a zone LOG(INFO) << "hang one zone"; ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); std::vector afterRecords; - // 打印每一秒的iops情况 + // Print IOPS per second for (int i = 1; i <= 10; ++i) { std::this_thread::sleep_for(std::chrono::seconds(1)); auto tmp = iops.value.get_value(1); LOG(INFO) << "after " << i << "s, iops: " << tmp; - // 记录后5s的iops值 + // Record the iops value for 5 seconds after recording if (i >= 5) { afterRecords.push_back(tmp); } diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 5d32ef8539..a0ec3ffcbb 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -135,7 +135,7 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, LOG(ERROR) << "start mds " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个mds + // Start an mds in a child process // ./bazel-bin/src/mds/main/curvemds std::vector args; args.emplace_back("./bazel-bin/src/mds/main/curvemds"); @@ -147,8 +147,8 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ execv("./bazel-bin/src/mds/main/curvemds", argv.data()); ClearArgv(argv); @@ -229,7 +229,7 @@ int CurveCluster::StartSnapshotCloneServer( LOG(ERROR) << "start snapshotcloneserver " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个snapshotcloneserver + //Starting a snapshotcloneserver in a child process std::vector args; args.emplace_back( "./bazel-bin/src/snapshotcloneserver/snapshotcloneserver"); @@ -239,8 +239,8 @@ int CurveCluster::StartSnapshotCloneServer( } std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ execv("./bazel-bin/src/snapshotcloneserver/snapshotcloneserver", argv.data()); @@ -321,7 +321,7 @@ int CurveCluster::StopAllSnapshotCloneServer() { ret = -1; } - // 等待进程完全退出 + // Wait for the process to completely exit ::sleep(2); LOG(INFO) << "stop all snapshotcloneservver end."; return ret; @@ -337,7 +337,7 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, LOG(ERROR) << "start etcd " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个etcd + // Start an ETCD in a child process // ip netns exec integ_etcd1 etcd std::vector args{"etcd"}; args.emplace_back("--listen-peer-urls=http://" + peerIpPort); @@ -354,8 +354,8 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ execvp("etcd", argv.data()); ClearArgv(argv); @@ -500,7 +500,7 @@ int CurveCluster::StartSingleChunkServer( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + split[0]); @@ -511,8 +511,8 @@ int CurveCluster::StartSingleChunkServer( std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ execv("./bazel-bin/src/chunkserver/chunkserver", argv.data()); ClearArgv(argv); @@ -547,7 +547,7 @@ int CurveCluster::StartSingleChunkServerInBackground( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("netns"); args.emplace_back("exec"); @@ -560,8 +560,8 @@ int CurveCluster::StartSingleChunkServerInBackground( } std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ execvp("ip", argv.data()); ClearArgv(argv); diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index e5b04d30c7..d30cb60622 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -60,53 +60,53 @@ namespace curve { class CurveCluster { public: /** - * CurveCluster 构造函数 + * CurveCluster constructor * - * @param[in] netWorkSegment 网桥的网络地址,默认为"192.168.200." - * @param[in] nsPrefix 网络命名空间的前缀,默认为"integ_" + * @param[in] netWorkSegment The network address of the bridge, which defaults to "192.168.200." + * @param[in] nsPrefix The prefix of the network namespace, which defaults to "integ_" */ CurveCluster(const std::string &netWorkSegment = "192.168.200.", const std::string &nsPrefix = "integ_") : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} /** - * InitMdsClient 初始化mdsclient, 用于和mds交互 + * InitMdsClient initializes mdsclient for interaction with mds * - * @param op 参数设置 - * @return 0.成功; 非0.失败 + * @param op parameter setting + * @return 0. Success; Non 0. Failure */ int InitMdsClient(const curve::client::MetaServerOption &op); /** - * @brief 初始化metastore + * @brief Initialize metastore * - * @param[in] etcdEndpoints etcd client的ip port + * @param[in] etcdEndpoints etcd client's IP port * - * @return 返回错误码 + * @return returns an error code */ int InitSnapshotCloneMetaStoreEtcd( const std::string &etcdEndpoints); /** - * BuildNetWork 如果需要是用不同的ip来起chunkserver, - * 需要在测试用例的SetUp中先 调用该函数 - * @return 0.成功; 非0.失败 + * If BuildNet needs to use a different IP to start the chunkserver, + * This function needs to be called first in the SetUp of the test case + * @return 0. Success; Non 0. Failure */ int BuildNetWork(); /** - * StopCluster 停止该集群中所有的进程 - * @return 0.成功; -1.失败 + * StopCluster stops all processes in the cluster + * @return 0.Success; -1.Failure */ int StopCluster(); /** - * @brief 生成各模块配置文件 + * @brief Generate configuration files for each module * - * @tparam T 任一ConfigGenerator - * @param configPath 配置文件路径 - * @param options 修改的配置项 + * @tparam T any ConfigGenerator + * @param configPath Configuration file path + * @param options Configuration items modified */ template void PrepareConfig(const std::string &configPath, @@ -117,79 +117,79 @@ class CurveCluster { } /** - * StartSingleMDS 启动一个mds - * 如果需要不同ip的chunkserver,ipPort请设置为192.168.200.1:XXXX + * StartSingleMDS starts an mds + * If need chunkservers with different IPs, please set the ipPort to 192.168.200.1:XXXX * * @param[in] id mdsId - * @param[in] ipPort 指定mds的ipPort - * @param[in] mdsConf mds启动参数项, 示例: + * @param[in] ipPort specifies the ipPort of the mds + * @param[in] mdsConf mds startup parameter item, example: * const std::vector mdsConf{ {"--graceful_quit_on_sigterm"}, {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, }; - * @param[in] expectLeader 是否预期是leader - * @return 成功则返回pid; 失败则返回-1 + * @param[in] expectLeader is the expected leader expected + * @return success returns pid; Failure returns -1 */ int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, const std::vector &mdsConf, bool expectLeader); /** - * StopMDS 停止指定id的mds - * @return 0.成功; -1.失败 + * StopMDS stops the specified id's mds + * @return 0.Success; -1.Failure */ int StopMDS(int id); /** - * StopAllMDS 停止所有mds - * @return 0.成功; -1.失败 + * StopAllMDS stops all mds + * @return 0.Success; -1.Failure */ int StopAllMDS(); /** - * @brief 启动一个snapshotcloneserver + * @brief Start a snapshotcloneserver * - * @param id snapshotcloneserver 的Id - * @param ipPort ip端口 - * @param snapshotcloneConf 参数项 - * @return 成功则返回pid; 失败则返回-1 + * @param id The ID of snapshotclone server + * @param ipPort IP Port + * @param snapshot clone Conf parameter item + * @return success returns pid; Failure returns -1 */ int StartSnapshotCloneServer(int id, const std::string &ipPort, const std::vector &snapshotcloneConf); /** - * @brief 停止指定Id的snapshotcloneserver + * @brief Stop the snapshotcloneserver for the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功返回0,失败返回-1 + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return returns 0 for success, -1 for failure */ int StopSnapshotCloneServer(int id, bool force = false); /** - * @brief 重启指定Id的snapshotcloneserver + * @brief: Restart the snapshotcloneserver with the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功则返回pid; 失败则返回-1 + * @param id The ID of the snapshotcloneserver + * @param force Use kill -9 when it is true + * @return success returns pid; Failure returns -1 */ int RestartSnapshotCloneServer(int id, bool force = false); /** - * @brief 停止所有的snapshotcloneserver - * @return 成功返回0,失败返回-1 + * @brief Stop all snapshotcloneserver + * @return returns 0 for success, -1 for failure */ int StopAllSnapshotCloneServer(); /** - * StartSingleEtcd 启动一个etcd节点 + * StartSingleEtcd starts an etcd node * * @param clientIpPort * @param peerIpPort - * @param etcdConf etcd启动项参数, 建议按照模块指定name,防止并发运行时冲突 + * @param etcdConf etcd startup parameter, it is recommended to specify the name according to the module to prevent concurrent runtime conflicts * std::vector{"--name basic_test_start_stop_module1"} - * @return 成功则返回pid; 失败则返回-1 + * @return success returns pid; Failure returns -1 */ int StartSingleEtcd(int id, const std::string &clientIpPort, const std::string &peerIpPort, @@ -197,41 +197,41 @@ class CurveCluster { /** * WaitForEtcdClusterAvalible - * 在一定时间内等待etcd集群leader选举成功,处于可用状态 + * Wait for the ETCD cluster leader election to be successful and available for a certain period of time */ bool WaitForEtcdClusterAvalible(int waitSec = 20); /** - * StopEtcd 停止指定id的etcd节点 - * @return 0.成功; -1.失败 + * StopEtcd stops the etcd node with the specified id + * @return 0.Success; -1.Failure */ int StopEtcd(int id); /** - * StopAllEtcd 停止所有etcd节点 - * @return 0.成功; -1.失败 + * StopAllEtcd stops all etcd nodes + * @return 0.Success; -1.Failure */ int StopAllEtcd(); /** - * @brief 格式化FilePool + * @brief Format FilePool * - * @param filePooldir FilePool目录 - * @param filePoolmetapath FilePool元数据目录 - * @param filesystemPath 文件系统目录 + * @param filePooldir FilePool directory + * @param filePoolmetapath FilePool metadata directory + * @param filesystemPath file system directory * @param size FilePool size (GB) - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ int FormatFilePool(const std::string &filePooldir, const std::string &filePoolmetapath, const std::string &filesystemPath, uint32_t size); /** - * StartSingleChunkServer 启动一个chunkserver节点 + * StartSingleChunkServer starts a chunkserver node * * @param[in] id * @param[in] ipPort - * @param[in] chunkserverConf chunkserver启动项,示例: + * @param[in] chunkserverConf chunkserver startup item, example: * const std::vector chunkserverConf1{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./basic1/"}, @@ -243,127 +243,127 @@ class CurveCluster { {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, {"-raft_sync_segments=true"}, }; - 建议文件名也按模块的缩写来,文件名不能太长,否则注册到数据库会失败 - * @return 成功则返回pid; 失败则返回-1 + It is recommended to also use the abbreviation of the module for the file name. The file name should not be too long, otherwise registering to the database will fail + * @return success returns pid; Failure returns -1 */ int StartSingleChunkServer(int id, const std::string &ipPort, const std::vector &chunkserverConf); /** - * StartSingleChunkServer 在网络命名空间内启动一个指定id的chunkserver - * 无需指定ipPort + * StartSingleChunkServer Starts a chunkserver with the specified id in the network namespace + * No need to specify ipPort * * @param id - * @param chunkserverConf, 同StartSingleChunkServer的示例 - * @return 成功则返回pid; 失败则返回-1 + * @param chunkserverConf, same as the example of StartSingleChunkServer + * @return success returns pid; Failure returns -1 */ int StartSingleChunkServerInBackground( int id, const std::vector &chunkserverConf); /** - * StopChunkServer 停掉指定id的chunkserver进程 - * @return 0.成功; -1.失败 + * StopChunkServer stops the chunkserver process with the specified id + * @return 0.Success; -1.Failure */ int StopChunkServer(int id); /** - * StopAllChunkServer 停止所有chunkserver - * @return 0.成功; -1.失败 + * StopAllChunkServer Stop all chunkserver + * @return 0.Success; -1.Failure */ int StopAllChunkServer(); /** - * PreparePhysicalPool 创建物理池 + * PreparePhysicalPool Create Physical Pool * - * @param[in] id 给指定id的mds发送命令 - * @param[in] clusterMap 拓扑信息,示例: - * ./test/integration/cluster_common/cluster_common_topo_1.txt (不同ip) + * @param[in] id Send command to the specified mds with id + * @param[in] clusterMap topology information, example: + * ./test/integration/cluster_common/cluster_common_topo_1.txt (different IPs) * ./test/integration/cluster_common/cluster_common_topo_2.txt - * (相同ip, 一定要加上port加以区分, - * chunkserver也必须和clusterMap中server的ipPort相同) - * @return 0.成功; -1.失败 + * (The same IP address must be distinguished by adding a port, + * The chunkserver must also be the same as the ipPort of the server in the clusterMap) + * @return 0.Success; -1.Failure */ int PreparePhysicalPool(int mdsId, const std::string &clusterMap); /** - * @return 0.成功; -1.失败 + * @return 0.Success; -1.Failure */ int PrepareLogicalPool(int mdsId, const std::string &clusterMap); /** - * MDSIpPort 获取指定id的mds地址 + * MDSIpPort retrieves the mds address of the specified id */ std::string MDSIpPort(int id); /** - * EtcdClientIpPort 获取指定id的etcd client地址 + * EtcdClientIpPort retrieves the etcd client address for the specified id */ std::string EtcdClientIpPort(int id); /** - * EtcdPeersIpPort 获取指定id的etcd peers地址 + * EtcdPeersIpPort retrieves the etcd Peers address of the specified id */ std::string EtcdPeersIpPort(int id); /** - * ChunkServerIpPort 获取指定id的chunkserver地址 + * ChunkServerIpPort retrieves the chunkserver address for the specified id */ std::string ChunkServerIpPort(int id); /** - * HangMDS hang住指定mds进程 - * @return 0.成功; -1.失败 + * HangMDS hang resides in the specified mds process + * @return 0.Success; -1.Failure */ int HangMDS(int id); /** - * RecoverHangMDS 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * RecoverHangMDS restores the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangMDS(int id); /** - * HangEtcd hang住指定etcd进程 - * @return 0.成功; -1.失败 + * HangEtcd hang lives in the specified etcd process + * @return 0.Success; -1.Failure */ int HangEtcd(int id); /** - * RecoverHangEtcd 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * RecoverHangEtcd recovers the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangEtcd(int id); /** - * HangChunkServer hang住指定chunkserver进程 - * @return 0.成功; -1.失败 + * HangChunkServer hang resides in the specified chunkserver process + * @return 0.Success; -1.Failure */ int HangChunkServer(int id); /** - * RecoverHangChunkServer 恢复hang住的chunkserver进程 - * @return 0.成功; -1.失败 + * RecoverHangChunkServer Restores the chunkserver process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangChunkServer(int id); /** - * CurrentServiceMDS 获取当前正在提供服务的mds + * CurrentServiceMDS obtains the mds that are currently providing services * - * @param[out] curId 当前正在服务的mds编号 + * @param[out] curId is currently serving the mds number * - * @return true表示有正在服务的mds, false表示没有正在服务的mds + * @return true indicates that there are serving mds, while false indicates that there are no serving mds */ bool CurrentServiceMDS(int *curId); /** - * CreateFile 在curve中创建文件 + * CreateFile creates a file in Curve. * - * @param[in] user 用户 - * @param[in] pwd 密码 - * @param[in] fileName 文件名 - * @param[in] fileSize 文件大小 - * @param[in] normalFile 是否为normal file - * @return 0.成功; -1.失败 + * @param[in] user User + * @param[in] pwd Password + * @param[in] fileName File name + * @param[in] fileSize File size + * @param[in] normalFile Whether it is a normal file + * @return 0. Success; -1. Failure */ int CreateFile(const std::string &user, const std::string &pwd, const std::string &fileName, uint64_t fileSize = 0, @@ -371,81 +371,82 @@ class CurveCluster { private: /** - * ProbePort 探测指定ipPort是否处于监听状态 + * ProbePort checks if the specified ipPort is in a listening state. * - * @param[in] ipPort 指定的ipPort值 - * @param[in] timeoutMs 探测的超时时间,单位是ms - * @param[in] expectOpen 是否希望是监听状态 + * @param[in] ipPort The specified ipPort value. + * @param[in] timeoutMs The timeout for probing in milliseconds. + * @param[in] expectOpen Whether it is expected to be in a listening state. * - * @return 0表示指定时间内的探测符合预期. -1表示指定时间内的探测不符合预期 + * @return 0 indicates that the probing meets the expected condition within the specified time. + * -1 indicates that the probing does not meet the expected condition within the specified time. */ int ProbePort(const std::string &ipPort, int64_t timeoutMs, bool expectOpen); /** * ChunkServerIpPortInBackground - * 在需要不同ip的chunkserver的情况下,用于生成chunkserver ipPort + * Used to generate chunkserver ipPort when chunkservers with different IPs are required */ std::string ChunkServerIpPortInBackground(int id); /** - * HangProcess hang住一个进程 + * HangProcess hang * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid process id + * @return 0.Success; -1.Failure */ int HangProcess(pid_t pid); /** - * RecoverHangProcess 恢复hang住的进程 + * RecoverHangProcess * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid process id + * @return 0.Success; -1.Failure */ int RecoverHangProcess(pid_t pid); private: - // 网络号 + // Network number std::string networkSegment_; - // 网络命名空间前缀 + // Network namespace prefix std::string nsPrefix_; - // mds的id对应的进程号 + // The process number corresponding to the ID of the mds std::map mdsPidMap_; - // mds的id对应的ipport + // The ipport corresponding to the ID of the mds std::map mdsIpPort_; - // snapshotcloneserver id对应的pid + // The pid corresponding to the snapshotcloneserver id std::map snapPidMap_; - // snapshotcloneserver id对应的ipPort + // The ipPort corresponding to the snapshotcloneserver ID std::map snapIpPort_; - // snapshotcloneserver id对应的conf + // Conf corresponding to snapshotcloneserver id std::map> snapConf_; - // etcd的id对应的进程号 + // The process number corresponding to the id of ETCD std::map etcdPidMap_; - // etcd的id对应的client ipport + // The client ipport corresponding to the id of ETCD std::map etcdClientIpPort_; - // etcd的id对应的peer ipport + // Peer ipport corresponding to the id of ETCD std::map etcdPeersIpPort_; - // chunkserver的id对应的进程号 + // The process number corresponding to the id of chunkserver std::map chunkserverPidMap_; - // chunkserver的id对应的ipport + // The IP port corresponding to the ID of the chunkserver std::map chunkserverIpPort_; // mdsClient std::shared_ptr mdsClient_; public: - // SnapshotCloneMetaStore用于测试过程中灌数据 + // SnapshotCloneMetaStore for filling data during testing std::shared_ptr metaStore_; }; } // namespace curve diff --git a/test/integration/cluster_common/cluster_basic_test.cpp b/test/integration/cluster_common/cluster_basic_test.cpp index 8f49b1ebe0..f17429343f 100644 --- a/test/integration/cluster_common/cluster_basic_test.cpp +++ b/test/integration/cluster_common/cluster_basic_test.cpp @@ -95,7 +95,7 @@ class ClusterBasicTest : public ::testing::Test { protected: void SetUp() { curveCluster_ = std::make_shared(); - // TODO(lixiaocui): 需要用sudo去运行,后续打开 + // TODO(lixiaocui): It needs to be run with sudo and opened later // curveCluster_->BuildNetWork(); } @@ -107,28 +107,28 @@ class ClusterBasicTest : public ::testing::Test { std::shared_ptr curveCluster_; }; -// TODO(lixiaocui): 需要sudo运行,ci变更后打开 +// TODO(lixiaocui): Requires sudo to run and open after ci changes TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { - // 起etcd + // Starting etcd pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", std::vector{ "--name=basic_test_start_stop_module1" }); LOG(INFO) << "etcd 1 started on 127.0.0.1:2221:2222, pid = " << pid; ASSERT_GT(pid, 0); - // 起mds + // Starting mds pid = curveCluster_->StartSingleMDS(1, "192.168.200.1:3333", 3334, mdsConf, true); LOG(INFO) << "mds 1 started on 192.168.200.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 创建chunkserver + // Create chunkserver pid = curveCluster_->StartSingleChunkServerInBackground(1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started in background, pid = " << pid; @@ -142,17 +142,17 @@ TEST_F(ClusterBasicTest, DISABLED_test_start_stop_module1) { LOG(INFO) << "chunkserver 3 started in background, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset + // Creating logical pools and copysets ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_1.json")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system("rm -r test_start_stop_module1.etcd"); @@ -165,7 +165,7 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { ASSERT_EQ(0, system("rm -fr basic*")); ASSERT_EQ(0, system((std::string("mkdir -p ") + commonDir).c_str())); - // 起etcd + // Starting etcd std::string etcdDir = commonDir + "/etcd.log"; pid_t pid = curveCluster_->StartSingleEtcd( 1, "127.0.0.1:2221", "127.0.0.1:2222", @@ -174,7 +174,7 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起mds + // Starting mds auto mdsConfbak = mdsConf; auto mdsDir = commonDir + "/mds"; ASSERT_EQ(0, system((std::string("mkdir ") + mdsDir).c_str())); @@ -184,19 +184,19 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { true); LOG(INFO) << "mds 1 started on 127.0.0.1:3333, pid = " << pid; ASSERT_GT(pid, 0); - // 初始化mdsclient + // Initialize mdsclient curve::client::MetaServerOption op; op.rpcRetryOpt.rpcTimeoutMs = 4000; op.rpcRetryOpt.addrs = std::vector{ "127.0.0.1:3333" }; ASSERT_EQ(0, curveCluster_->InitMdsClient(op)); - // 创建物理池 + // Creating a physical pool ASSERT_EQ( 0, curveCluster_->PreparePhysicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建chunkserver + // Create chunkserver auto copy1 = chunkserverConf1; std::string chunkserver1Dir = commonDir + "/chunkserver1"; ASSERT_EQ(0, system((std::string("mkdir ") + chunkserver1Dir).c_str())); @@ -224,40 +224,40 @@ TEST_F(ClusterBasicTest, test_start_stop_module2) { LOG(INFO) << "chunkserver 3 started on 127.0.0.1:2004, pid = " << pid; ASSERT_GT(pid, 0); - // 创建逻辑池和copyset + // Creating logical pools and copysets ASSERT_EQ(0, curveCluster_->PrepareLogicalPool( 1, "./test/integration/cluster_common/cluster_common_topo_2.json")); - // 创建文件 + // Create File ASSERT_EQ(0, curveCluster_->CreateFile("test", "test", "/basic_test", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // hang mds进程 + // hang mds process ASSERT_EQ(0, curveCluster_->HangMDS(1)); - // 创建文件失败 + // Failed to create file ASSERT_NE(0, curveCluster_->CreateFile("test1", "test1", "/basic_test1", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 恢复mds进程 + // Resume mds process ASSERT_EQ(0, curveCluster_->RecoverHangMDS(1)); - // 创建文件成功 + // Successfully created file ASSERT_EQ(0, curveCluster_->CreateFile("test2", "test2", "/basic_test2", 10 * 1024 * 1024 * 1024UL, /*normalFile=*/true, "SSD_2")); - // 停掉chunkserver + // Stop chunkserver ASSERT_EQ(0, curveCluster_->StopChunkServer(1)); ASSERT_EQ(0, curveCluster_->StopChunkServer(2)); ASSERT_EQ(0, curveCluster_->StopChunkServer(3)); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); system((std::string("rm -fr ") + commonDir).c_str()); @@ -271,7 +271,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_EQ(0, system("rm -fr test_multi_etcd_node*.etcd")); ASSERT_EQ(0, system((std::string("mkdir ") + commonDir).c_str())); - // 起三个etcd + // Start three ETCDs std::string etcdDir = commonDir + "/etcd"; ASSERT_EQ(0, system((std::string("mkdir ") + etcdDir).c_str())); std::vector etcdCluster{ @@ -307,7 +307,7 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { ASSERT_GT(pid, 0); ASSERT_TRUE(curveCluster_->WaitForEtcdClusterAvalible()); - // 起三mds + // Starting three mds std::string mds1Dir = commonDir + "/mds1"; std::string mds2Dir = commonDir + "/mds2"; std::string mds3Dir = commonDir + "/mds3"; @@ -340,16 +340,16 @@ TEST_F(ClusterBasicTest, test_multi_mds_and_etcd) { LOG(INFO) << "mds 3 started on 127.0.0.1:2312, pid = " << pid; ASSERT_GT(pid, 0); - // 获取当前正在服务的mds + // Obtain the currently serving mds int curMds; ASSERT_TRUE(curveCluster_->CurrentServiceMDS(&curMds)); ASSERT_EQ(1, curMds); - // 停掉mds + // Stop mds ASSERT_EQ(0, curveCluster_->StopMDS(1)); ASSERT_EQ(0, curveCluster_->StopMDS(2)); ASSERT_EQ(0, curveCluster_->StopMDS(3)); - // 停掉etcd + // Stop etcd ASSERT_EQ(0, curveCluster_->StopEtcd(1)); ASSERT_EQ(0, curveCluster_->StopEtcd(2)); ASSERT_EQ(0, curveCluster_->StopEtcd(3)); diff --git a/test/integration/cluster_common/mds.basic.conf b/test/integration/cluster_common/mds.basic.conf index 9486982bf5..b0cb16d055 100644 --- a/test/integration/cluster_common/mds.basic.conf +++ b/test/integration/cluster_common/mds.basic.conf @@ -15,196 +15,196 @@ # # -# mds服务端口 +# Mds service port # mds.listen.addr=127.0.0.1:6666 # -# etcd相关配置 +# ETCD related configurations # -# etcd地址 +# ETCD address mds.etcd.endpoint=localhost:2221 -# client建立连接的超时时间 +# The timeout period for establishing a connection with a client mds.etcd.dailtimeoutMs=5000 -# client进行put/get/txn等操作的超时时间 +# The timeout period for client to perform put/get/txn and other operations mds.etcd.operation.timeoutMs=5000 -# client操作失败可以重试的次数 +# The number of times a client operation failed and can be retried mds.etcd.retry.times=3 # -# segment分配量统计相关配置 +# Configuration related to segment allocation statistics # -# 将内存中的数据持久化到etcd的间隔, 单位ms +# The interval between persisting data in memory to ETCD, in milliseconds mds.segment.alloc.periodic.persistInterMs=1000 -# 出错情况下的重试间隔,单位ms +# The retry interval in ms in case of an error mds.segment.alloc.retryInterMs=1000 -# leader竞选时会创建session, 单位是秒, 因为go端代码的接口这个值得单位就是s +# During the leader campaign, a session will be created in seconds, as the value unit for the interface of the go side code is seconds mds.leader.sessionInterSec=5 -# leader竞选的超时时间,如果为0竞选不成功会一直block, 如果大于0,在electionTimeoutMs时间 -# 内未当选leader会返回错误。这里设置10分钟超时,超时后mds会继续竞选 +# The timeout for leader election. If set to 0, the election will block indefinitely if unsuccessful. If set to a value greater than 0, an error will be returned if not elected as leader within the electionTimeoutMs duration. +# Here, a timeout of 10 minutes is set, and if it times out, the MDS will continue the election process. mds.leader.electionTimeoutMs=0 # -# scheduler相关配置 +# Schedule related configurations # -# copysetScheduler开关 +# copysetScheduler switch mds.enable.copyset.scheduler=true -# leaderScheduler开关 +# leaderScheduler switch mds.enable.leader.scheduler=true -# recoverScheduler开关 +# recoverScheduler switch mds.enable.recover.scheduler=true -# replicaScheduler开关 +# replicaScheduler switch mds.enable.replica.scheduler=true -# copysetScheduler 轮次间隔,单位是s +# copysetScheduler round interval, measured in seconds mds.copyset.scheduler.intervalSec=5 -# replicaScheduler 轮次间隔,单位是s +# replicaScheduler round interval, measured in seconds mds.replica.scheduler.intervalSec=5 -# leaderScheduler 轮次间隔,单位是s +# leaderScheduler round interval, measured in seconds mds.leader.scheduler.intervalSec=30 -# recoverScheduler 轮次间隔,单位是s +# recoverScheduler round interval, measured in seconds mds.recover.scheduler.intervalSec=5 -# 每块磁盘上operator的并发度 +# The concurrency of operators on each disk mds.schduler.operator.concurrent=4 -# leader变更超时时间, 超时后mds从内存移除该operator +# The leader changes the timeout time, and after the timeout, the mds removes the operator from memory mds.schduler.transfer.limitSec=1800 -# 减一个副本超时时间, 超时后mds从内存移除该operator +# Reduce the replica timeout by one, and after the timeout, the mds removes the operator from memory mds.scheduler.remove.limitSec=1800 -# 增加一个副本超时时间, 超时后mds从内存移除该operator +# Add a replica timeout, after which the mds removes the operator from memory mds.scheduler.add.limitSec=1800 -# copyset数量极差不能超过均值的百分比 +# The range of copyset quantity cannot exceed the percentage of the mean mds.scheduler.copysetNumRangePercent=0.05 -# chunkserver上copyset的scatte-rwidth不能超过最小值的百分比 +# The scatter width of the copyset on chunkserver cannot exceed the percentage of the minimum value mds.schduler.scatterWidthRangePerent=0.2 -# 一个server上超过一定数量的chunkserver offline, 不做恢复 +# There are more than a certain number of chunkserver offline on a server, and no recovery will be performed mds.chunkserver.failure.tolerance=3 -# chunkserver启动coolingTimeSec_后才可以作为target leader, 单位是s -# TODO(lixiaocui): 续得一定程度上与快照的时间间隔方面做到相关 +# chunkserver starts coolingTimeSec_ Only then can it be used as a target leader, with the unit of s +# TODO(lixiaocui): It needs to be related to the time interval of snapshots to some extent. mds.scheduler.chunkserver.cooling.timeSec=1800 # -# 心跳相关配置,单位为ms +# Heartbeat related configuration, in ms # -# chunkserver和mds的心跳间隔 +# Heartbeat interval between chunkserver and mds mds.heartbeat.intervalMs=1000 -# chunkserver和mds间心跳miss的时间 +# The time of heartbeat miss between chunkserver and mds mds.heartbeat.misstimeoutMs=3000 -# mds在心跳miss后offlinetimeout被标记为offline +# Mds marked offlinetimeout as offline after heartbeat miss mds.heartbeat.offlinetimeoutMs=1800000 -# mds启动后延迟一定时间开始指导chunkserver删除物理数据 -# 需要延迟删除的原因在代码中备注 +# After starting the mds, delay for a certain period of time to guide chunkserver in deleting physical data +# The reason for delayed deletion is noted in the code mds.heartbeat.clean_follower_afterMs=1200000 # -# namespace cache相关 +# namespace cache related # -# namestorage的缓存大小,为0表示不缓存 -# 按照每个文件最小10GB的空间预算。算上超售(2倍) -# 文件数量 = 5PB/10GB ~= 524288 个文件 -# sizeof(namespace对象) * 524288 ~= 89Byte *524288 ~= 44MB 空间 -# 16MB chunk大小, 1个segment 1GB -# sizeof(segment 对象) * 2621440 ~=(32 + (1024/16)*12)* 2621440 ~= 1.95 GB -# 数据量:3GB左右 -# 记录数量:524288+2621440 ~= 300w左右 +# The cache size of namestorage, where 0 indicates no caching +# Based on a minimum space budget of 10GB per file. Including oversold (2x) +# Number of files = 5PB/10GB ~= 524288 files +# sizeof(namespace object) * 524288 ~= 89Byte * 524288 ~= 44MB space +# 16MB chunk size, 1 segment 1GB +# sizeof(segment object) * 2621440 ~= (32+(1024/16) * 12) * 2621440 ~= 1.95 GB +# Data volume: about 3GB +# Record quantity: 524288+2621440 ~= about 300w mds.cache.count=100000 # # mysql Database config # -# 数据库使用的database名称 +# The database name used by the database mds.DbName=cluster_common_curve_mds -# 数据库用户名 +# Database username mds.DbUser=root -# 数据库地址 +# Database address mds.DbUrl=localhost -# 数据库登录密码 +# Database login password mds.DbPassword=qwer mds.DbPoolSize=128 # # mds.session settings # -# mds.session过期时间,单位us +# mds.session expiration time, in us mds.session.leaseTimeUs=5000000 -# 能够容忍的client和mds之间的时钟不同步的时间,单位us +# Tolerable time of clock asynchrony between client and mds, in units of us mds.session.toleranceTimeUs=500000 -# mds.session后台扫描线程扫描间隔时间,单位us +# mds.session Background Scan Thread Scan Interval Time, Unit: us mds.session.intevalTimeUs=500000 # # auth settings # -# root用户密码 +# root User Password mds.auth.rootPassword=root_password # # file lock setting # -# mds的文件锁桶大小 +# File lock bucket size for mds mds.filelock.bucketNum=8 # # topology config # -# Toplogy 定期刷新入数据库的时间间隔 +# The time interval for Toplogy to periodically refresh into the database mds.topology.TopologyUpdateToRepoSec=60 -# 请求chunkserver上创建全部copyset的超时时间 +# Request timeout for creating all copysets on chunkserver mds.topology.CreateCopysetRpcTimeoutMs=10000 -# 请求chunkserver上创建copyset重试次数 +# Request to create copyset on chunkserver retry count mds.topology.CreateCopysetRpcRetryTimes=20 -# 请求chunkserver上创建copyset重试间隔 +# Request to create copyset on chunkserver retry interval mds.topology.CreateCopysetRpcRetrySleepTimeMs=1000 -# Topology模块刷新metric时间间隔 +# Topology module refresh metric interval mds.topology.UpdateMetricIntervalSec=1 -# 物理池使用百分比,即使用量超过这个值即不再往这个池分配 +# The percentage of physical pool usage, even if the usage exceeds this value, it will no longer be allocated to this pool mds.topology.PoolUsagePercentLimit=90 -# 多pool选pool策略 0:Random, 1:Weight +# Multi pool selection pool strategy 0:Random, 1:Weight mds.topology.choosePoolPolicy=0 # # copyset config -# 默认值,为0时不启用 +# Default value, not enabled when 0 # -# 生成copyset重试次数 +# Generate copyset retry count mds.copyset.copysetRetryTimes=10 -# 所有chunkserver的scatterWidth需满足的最大方差 +# The maximum variance that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthVariance=0 -# 所有chunkserver的scatterWidth需满足的最大标准差 +# The maximum standard deviation that the scatterWidth of all chunkservers must meet mds.copyset.scatterWidthStandardDevation=0 -# 所有chunkserver的scatterWidth需满足的最大极差 +# The maximum range that the scatterWidth of all chunkservers needs to meet mds.copyset.scatterWidthRange=0 -# 所有chunkserver的scatterWidth偏离均值的百分比 -# scatterwidth偏移百分比设置过大,导致部分机器scatterwidth过小,影响机器恢复时间,恢复 -# 时间会导致集群的可靠性降低;其次,会导致部分机器scatterwith过大,某些chunkserver上的 -# copyset散落在各机器上,其他机器一旦写入,这些scatter-with较大的机器成为热点 -# scatterwidth偏移百分比设置过小,导致scatterwidth平均程度要求更大,copyset算法要求越高, -# 导致算法可能算不出理想结果,建议设置值为20 +# Percentage of Deviation from the Mean ScatterWidth of All Chunk Servers. Setting a high percentage for scatterWidth deviation can lead to some machines having +# excessively small scatterWidth, which impacts machine recovery times and reduces the overall reliability of the cluster. Additionally, it can result in certain machines +# having excessively large scatterWidth values, causing copysets on these chunk servers to be scattered across various machines. When other machines write data, these servers +# with larger scatterWidth can become performance bottlenecks. +# Conversely, setting a low percentage for scatterWidth deviation requires a higher degree of scatterWidth uniformity, demanding more from the copyset algorithm. This +# can lead to the algorithm being unable to produce optimal results. It is recommended to set the value at 20 for a balance between these factors. mds.copyset.scatterWidthFloatingPercentage=20 # # curvefs config # -# curvefs的默认chunk size大小,16MB = 16*1024*1024 = 16777216 +# The default chunk size for curvefs is 16MB = 16*1024*1024 = 16777216 mds.curvefs.defaultChunkSize=16777216 # # chunkseverclient config # -# rpc 超时时间 +# RPC timeout mds.chunkserverclient.rpcTimeoutMs=500 -# rpc 重试次数 +# RPC retry count mds.chunkserverclient.rpcRetryTimes=5 -# rpc 重试时间间隔 +# RPC retry interval mds.chunkserverclient.rpcRetryIntervalMs=500 -# 从copyset的每个chunkserver getleader的重试的最大轮次 +# The maximum number of retries from each chunkserver getleader in the copyset mds.chunkserverclient.updateLeaderRetryTimes=5 -# 从copyset的每个chunkserver getleader的每一轮的间隔,需大于raft选主的时间 +# The interval between each round of each chunkserver getleader in the copyset must be greater than the time for selecting the master in the raft mds.chunkserverclient.updateLeaderRetryIntervalMs=5000 # # common options # -# 日志存放文件夹 +# Log storage folder mds.common.logDir=./runlog/ diff --git a/test/integration/common/chunkservice_op.cpp b/test/integration/common/chunkservice_op.cpp index d359d5e294..c4b79a37bd 100644 --- a/test/integration/common/chunkservice_op.cpp +++ b/test/integration/common/chunkservice_op.cpp @@ -105,7 +105,7 @@ int ChunkServiceOp::ReadChunk(struct ChunkServiceOpConf *opConf, CHUNK_OP_STATUS status = response.status(); LOG_IF(ERROR, status) << "read failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -145,7 +145,7 @@ int ChunkServiceOp::ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, LOG_IF(ERROR, status) << "readchunksnapshot failed: " << CHUNK_OP_STATUS_Name(status); - // 读成功,复制内容到data + // Successfully read, copy content to data if (status == CHUNK_OP_STATUS_SUCCESS && data != nullptr) { cntl.response_attachment().copy_to(data, cntl.response_attachment().size()); @@ -342,7 +342,7 @@ int ChunkServiceVerify::VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, << ", offset=" << offset << ", len=" << len << ", cloneFileSource=" << cloneFileSource << ", cloneFileOffset=" << cloneFileOffset << ", ret=" << ret; - // chunk写成功,同步更新chunkData内容和existChunks_ + // Chunk successfully written, synchronously updating chunkData content and existChunks_ if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) chunkData->replace(offset, len, data); existChunks_.insert(chunkId); @@ -381,12 +381,12 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences uint32_t i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 + // Read data that matches expectations, return 0 if (i == len) return 0; @@ -394,7 +394,7 @@ int ChunkServiceVerify::VerifyReadChunk(ChunkID chunkId, SequenceNum sn, << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个page的第一个字节 + // Print the first byte of each page uint32_t j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -431,12 +431,12 @@ int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, return -1; } - // 读成功,则判断内容是否与chunkData吻合 + // If read successfully, determine if the content matches chunkData if (ret == CHUNK_OP_STATUS_SUCCESS && chunkData != nullptr) { - // 查找数据有差异的位置 + // Find locations with data differences int i = 0; while (i < len && data[i] == (*chunkData)[offset + i]) ++i; - // 读取数据与预期相符,返回0 + // Read data that matches expectations, return 0 if (i == len) return 0; @@ -444,7 +444,7 @@ int ChunkServiceVerify::VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, << ", from offset " << offset + i << ", read " << static_cast(data[i]) << ", expected " << static_cast((*chunkData)[offset + i]); - // 打印每个4KB的第一个字节 + // Print the first byte of each 4KB int j = i / kPageSize * kPageSize; for (; j < len; j += kPageSize) { LOG(ERROR) << "chunk offset " << offset + j << ": read " @@ -518,7 +518,7 @@ int ChunkServiceVerify::VerifyGetChunkInfo(ChunkID chunkId, bool chunk_existed = existChunks_.find(chunkId) != std::end(existChunks_); switch (ret) { case CHUNK_OP_STATUS_SUCCESS: - // 如果curSn或snapSn与预期不符,则返回-1 + // If curSn or snapSn does not match expectations, return -1 LOG_IF(ERROR, (curSn != expCurSn || snapSn != expSnapSn)) << "GetChunkInfo for " << chunkId << " failed, curSn=" << curSn << ", expected " << expCurSn << "; snapSn=" << snapSn @@ -526,14 +526,14 @@ int ChunkServiceVerify::VerifyGetChunkInfo(ChunkID chunkId, return (curSn != expCurSn || snapSn != expSnapSn) ? -1 : 0; case CHUNK_OP_STATUS_CHUNK_NOTEXIST: - // 如果chunk预期存在,则返回-1 + // If chunk is expected to exist, return -1 LOG_IF(ERROR, chunk_existed) << "Unexpected GetChunkInfo NOTEXIST, chunk " << chunkId << " must be existed"; return chunk_existed ? -1 : 0; case CHUNK_OP_STATUS_REDIRECTED: - // 如果返回的redirectedLeader与给定的不符,则返回-1 + // If the redirectedLeader returned does not match the given, then -1 is returned LOG_IF(ERROR, expLeader != redirectedLeader) << "GetChunkInfo failed, redirected to " << redirectedLeader << ", expected " << expLeader; diff --git a/test/integration/common/chunkservice_op.h b/test/integration/common/chunkservice_op.h index 28f32c6891..c662f72c6e 100644 --- a/test/integration/common/chunkservice_op.h +++ b/test/integration/common/chunkservice_op.h @@ -49,16 +49,16 @@ struct ChunkServiceOpConf { class ChunkServiceOp { public: /** - * @brief 通过chunkService写chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Write a chunk through chunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data to be written + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int WriteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, @@ -67,16 +67,16 @@ class ChunkServiceOp { off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk through chunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int ReadChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, @@ -85,50 +85,50 @@ class ChunkServiceOp { off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk快照 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk snapshot through chunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 读取内容 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data reading content + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, std::string *data); /** - * @brief 通过chunkService删除chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Delete chunk through chunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param sn chunk version + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int DeleteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, SequenceNum sn); /** - * @brief 通过chunkService删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Delete the snapshot generated during this dump or historical legacy through chunkService + * If no snapshot is generated during the dump process, modify the correctedSn of the chunk + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId * @param correctedSn - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf *opConf, ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 通过chunkService创建clone chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Create a clone chunk through chunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId - * @param location 源chunk在源端的位置,可能在curve或S3上 + * @param location The location of the source chunk on the source side, possibly on curve or S3 * @param correctedSn * @param sn * @param chunkSize - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int CreateCloneChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, const std::string &location, @@ -136,24 +136,24 @@ class ChunkServiceOp { uint64_t chunkSize); /** - * @brief 通过chunkService恢复chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Restore Chunk through ChunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int RecoverChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, off_t offset, size_t len); /** - * @brief 通过chunkService获取chunk元数据 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Obtain chunk metadata through chunkService + * @param opConf Common configuration parameters such as, leaderPeer/copyset, etc * @param chunkId - * @param curSn 返回当前chunk版本 - * @param snapSn 返回快照chunk版本 - * @param redirectedLeader 返回重定向主节点 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param curSn returns the current chunk version + * @param snapSn returns the snapshot chunk version + * @param redirectedLeader returns the redirected master node + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ static int GetChunkInfo(struct ChunkServiceOpConf *opConf, ChunkID chunkId, SequenceNum *curSn, SequenceNum *snapSn, @@ -166,16 +166,16 @@ class ChunkServiceVerify { : opConf_(opConf) {} /** - * @brief 执行写chunk, 并将数据写入到chunkdata对应区域,以便于后续验证数据。 + * @brief executes the write chunk and writes the data to the corresponding area of chunkdata for subsequent data validation. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param data 待写数据 - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 返回写操作的错误码 + * @param data to be written + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return returns the error code for the write operation */ int VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, const char *data, string *chunkData, @@ -183,15 +183,15 @@ class ChunkServiceVerify { off_t cloneFileOffset = 0); /** - * @brief 执行读chunk, 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief executes the read chunk and verifies whether the read content matches the expected data in the corresponding region of the chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @param cloneFileSource The file path of the clone source + * @param cloneFileOffset Relative offset of clone chunk in clone source + * @return The read request result meets the expected return of 0, otherwise it returns -1 */ int VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, string *chunkData, @@ -199,71 +199,71 @@ class ChunkServiceVerify { off_t cloneFileOffset = 0); /** - * @brief 执行读chunk快照, - * 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Execute read chunk snapshot, + *And verify whether the read content matches the expected data in the corresponding area of chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData Expected data for the entire chunk + * @return The read request result meets the expected return of 0, otherwise it returns -1 */ int VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, string *chunkData); /** - * @brief 删除chunk + * @brief delete chunk * @param chunkId - * @param sn chunk版本 - * @return 返回删除操作的错误码 + * @param sn chunk version + * @return returns the error code for the delete operation */ int VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn); /** - * @brief 删除chunk的快照 + * @brief Delete the snapshot of the chunk * @param chunkId * @param correctedSn - * @return 返回删除操作的错误码 + * @return returns the error code for the delete operation */ int VerifyDeleteChunkSnapshotOrCorrectSn(ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 创建clone chunk + * @brief Create clone chunk * @param chunkId - * @param location 源地址 + * @param location source address * @param correctedSn * @param sn * @param chunkSize - * @return 返回创建操作的错误码 + * @return returns the error code for the creation operation */ int VerifyCreateCloneChunk(ChunkID chunkId, const std::string &location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 恢复chunk + * @brief restore chunk * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise an error code will be returned */ int VerifyRecoverChunk(ChunkID chunkId, off_t offset, size_t len); /** - * @brief 获取chunk元数据,并检验结果是否符合预期 + * @brief to obtain chunk metadata and verify if the results meet expectations * @param chunkId - * @param expCurSn 预期chunk版本,-1表示不存在 - * @param expSanpSn 预期快照版本,-1表示不存在 - * @param expLeader 预期redirectedLeader - * @return 验证成功返回0,否则返回-1 + * @param expCurSn Expected chunk version, -1 indicates non-existent + * @param expSanpSn Expected snapshot version, -1 indicates non-existent + * @param expLeader Expected redirectedLeader + * @return returns 0 after successful verification, otherwise returns -1 */ int VerifyGetChunkInfo(ChunkID chunkId, SequenceNum expCurSn, SequenceNum expSnapSn, string expLeader); private: struct ChunkServiceOpConf *opConf_; - // 记录写过的chunkId(预期存在),用于判断请求的返回值是否符合预期 + //Record the chunkId (expected existence) that has been written, used to determine whether the return value of the request meets expectations std::set existChunks_; }; diff --git a/test/integration/common/config_generator.h b/test/integration/common/config_generator.h index e838aed61f..84e32f47d1 100644 --- a/test/integration/common/config_generator.h +++ b/test/integration/common/config_generator.h @@ -40,7 +40,7 @@ class CSTConfigGenerator : public ConfigGenerator { CSTConfigGenerator() {} ~CSTConfigGenerator() {} bool Init(const std::string& port) { - // 加载配置文件模板 + // Load Configuration File Template config_.SetConfigPath(DEFAULT_CHUNKSERVER_CONF); if (!config_.LoadConfig()) { return false; diff --git a/test/integration/common/peer_cluster.cpp b/test/integration/common/peer_cluster.cpp index f09db13283..c5f50fe0b0 100644 --- a/test/integration/common/peer_cluster.cpp +++ b/test/integration/common/peer_cluster.cpp @@ -109,7 +109,7 @@ int PeerCluster::StartPeer(const Peer &peer, LOG(ERROR) << "start peer fork failed"; return -1; } else if (0 == pid) { - /* 在子进程起一个 ChunkServer */ + /* Starting a ChunkServer in a child process */ StartPeerNode(id, params_[paramsIndexs_[id]]); exit(0); } @@ -120,7 +120,7 @@ int PeerCluster::StartPeer(const Peer &peer, std::unique_ptr>(peerId.to_string(), std::move(peerNode))); - // 在创建copyset之前,先等chunkserver启动 + // Before creating a copyset, wait for chunkserver to start ::usleep(1500 * 1000); int ret = CreateCopyset(logicPoolID_, copysetID_, peer, peers_); @@ -258,7 +258,7 @@ int PeerCluster:: ConfirmLeader(const LogicPoolID &logicPoolId, int PeerCluster::WaitLeader(Peer *leaderPeer) { butil::Status status; /** - * 等待选举结束 + * Waiting for the election to end */ ::usleep(3 * electionTimeoutMs_ * 1000); const int kMaxLoop = (3 * electionTimeoutMs_) / 100; @@ -267,8 +267,8 @@ int PeerCluster::WaitLeader(Peer *leaderPeer) { status = GetLeader(logicPoolID_, copysetID_, conf_, leaderPeer); if (status.ok()) { /** - * 由于选举之后还需要提交应用 noop entry 之后才能提供服务, - * 所以这里需要等待 noop apply,这里等太短,可能容易失败,后期改进 + * Due to the need to submit the application noop entry after the election to provide services, + * So we need to wait for the noop application here. If the wait time is too short, it may be easy to fail, so we need to improve it later */ usleep(electionTimeoutMs_ * 1000); LOG(INFO) << "Wait leader success, leader is: " @@ -446,15 +446,15 @@ std::shared_ptr PeerCluster::fs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -521,14 +521,14 @@ void WriteThenReadVerify(Peer leaderPeer, } /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -569,14 +569,14 @@ void ReadVerify(Peer leaderPeer, } /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -592,7 +592,7 @@ void ReadSnapshotVerify(Peer leaderPeer, ChunkService_Stub stub(&channel); - // 获取chunk的快照版本 + // Obtain the snapshot version of the chunk uint64_t snapSn; { brpc::Controller cntl; @@ -639,10 +639,10 @@ void ReadSnapshotVerify(Peer leaderPeer, } /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id * @param csn corrected sn */ @@ -677,14 +677,14 @@ void DeleteSnapshotVerify(Peer leaderPeer, } /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -725,14 +725,14 @@ void ReadNotVerify(Peer leaderPeer, } /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, @@ -770,14 +770,14 @@ void ReadVerifyNotAvailable(Peer leaderPeer, } /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, @@ -816,10 +816,10 @@ void WriteVerifyNotAvailable(Peer leaderPeer, } /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ void CopysetStatusVerify(const std::vector &peers, LogicPoolID logicPoolID, @@ -847,7 +847,7 @@ void CopysetStatusVerify(const std::vector &peers, ASSERT_FALSE(cntl.Failed()); LOG(INFO) << peerId.to_string() << "'s status is: \n" << response.DebugString(); - // 多个副本的state是不一样的,因为有leader,也有follower + // The states of multiple replicas are different because there are leaders and followers response.clear_state(); response.clear_peer(); response.clear_firstindex(); diff --git a/test/integration/common/peer_cluster.h b/test/integration/common/peer_cluster.h index 4a5fcacb58..f310630d16 100644 --- a/test/integration/common/peer_cluster.h +++ b/test/integration/common/peer_cluster.h @@ -53,29 +53,29 @@ namespace chunkserver { using curve::common::Peer; /** - * PeerNode 状态 - * 1. exit:未启动,或者被关闭 - * 2. running:正在运行 - * 3. stop:hang 住了 + * PeerNode status + * 1. exit: Not started or closed + * 2. running: Running + * 3. stop: hang */ enum class PeerNodeState { - EXIT = 0, // 退出 - RUNNING = 1, // 正在运行 - STOP = 2, // hang住 + EXIT = 0, // Exit + RUNNING = 1, // Running + STOP = 2, // Hang Stay }; /** - * 一个 ChunkServer 进程,包含某个 Copyset 的某个副本 + * A ChunkServer process that contains a copy of a Copyset */ struct PeerNode { PeerNode() : pid(0), state(PeerNodeState::EXIT) {} - // Peer对应的进程id + // Process ID corresponding to Peer pid_t pid; // Peer Peer peer; - // copyset的集群配置 + // Cluster configuration for copyset Configuration conf; - // PeerNode的状态 + // Status of PeerNode PeerNodeState state; }; @@ -92,7 +92,7 @@ class FakeTopologyService : public TopologyService { }; /** - * 封装模拟cluster测试相关的接口 + * Package simulation cluster testing related interfaces */ class PeerCluster { public: @@ -119,48 +119,48 @@ class PeerCluster { int StartFakeTopoloyService(const std::string &listenAddr); /** - * 启动一个 Peer + * Start a Peer * @param peer - * @param empty初始化配置是否为空 - * @return 0,成功;-1,失败 + * @param empty Is the initialization configuration empty + * @return 0, successful- 1. Failure */ int StartPeer(const Peer &peer, int id, const bool empty = false); /** - * 关闭一个peer,使用SIGINT + * Close a peer and use SIGINT * @param peer - * @return 0 成功;-1 失败 + * @return 0 successful, - 1 failed */ int ShutdownPeer(const Peer &peer); /** - * hang住一个peer,使用SIGSTOP + * hang lives in a peer and uses SIGSTOP * @param peer - * @return 0成功;-1失败 + * @return 0 successful, - 1 failed */ int HangPeer(const Peer &peer); /** - * 恢复hang住的peer,使用SIGCONT + * Restore the peer where Hang lives and use SIGCONT * @param peer - * @return 0:成功,-1 失败 + * @return 0: Success, -1 failed */ int SignalPeer(const Peer &peer); /** - * 反复重试直到等到新的leader产生 - * @param leaderPeer出参,返回leader info - * @return 0,成功;-1 失败 + * Try again and again until a new leader is generated + * @param leaderPeer generates parameters and returns leader information + * @return 0 successful, - 1 failed */ int WaitLeader(Peer *leaderPeer); /** - * confirm leader + * confirm leader * @param: LogicPoolID logicalPool id * @param: copysetId copyset id * @param: leaderAddr leader address - * @param: leader leader info - * @return 0,成功;-1 失败 + * @param: leader leader information + * @return 0 successful, - 1 failed */ int ConfirmLeader(const LogicPoolID &logicPoolId, const CopysetID ©setId, @@ -169,13 +169,13 @@ class PeerCluster { /** - * Stop所有的peer - * @return 0,成功;-1 失败 + * Stop all peers + * @return 0 successful, - 1 failed */ int StopAllPeers(); public: - /* 返回集群当前的配置 */ + /* Returns the current configuration of the cluster */ Configuration CopysetConf() const; LogicPoolID GetLogicPoolId() const {return logicPoolID_;} @@ -184,7 +184,7 @@ class PeerCluster { void SetWorkingCopyset(CopysetID copysetID) {copysetID_ = copysetID;} - /* 修改 PeerNode 配置相关的接口,单位: s */ + /* Modify the interface related to PeerNode configuration, unit: s */ int SetsnapshotIntervalS(int snapshotIntervalS); int SetElectionTimeoutMs(int electionTimeoutMs); @@ -198,12 +198,12 @@ class PeerCluster { public: /** - * 返回执行peer的copyset路径with protocol, ex: local://./127.0.0.1:9101:0 + * Returns the copyset path for executing peer with protocol, ex: local://./127.0.0.1:9101:0 */ static const std::string CopysetDirWithProtocol(const Peer &peer); /** - * 返回执行peer的copyset路径without protocol, ex: ./127.0.0.1:9101:0 + * Returns the copyset path for executing peer without protocol, ex: ./127.0.0.1:9101:0 */ static const std::string CopysetDirWithoutProtocol(const Peer &peer); @@ -222,32 +222,32 @@ class PeerCluster { const std::vector& peers); private: - // 集群名字 + // Cluster Name std::string clusterName_; - // 集群的peer集合 + // The peer set of the cluster std::vector peers_; - // peer集合的映射map + // Mapping Map of Peer Set std::unordered_map> peersMap_; - // 快照间隔 + // Snapshot interval int snapshotIntervalS_; - // 选举超时时间 + // Election timeout int electionTimeoutMs_; - // 集群成员配置 + // Cluster member configuration Configuration conf_; - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolID_; - // 复制组id + // Copy Group ID CopysetID copysetID_; // chunkserver id static ChunkServerID chunkServerId_; - // 文件系统适配层 + // File System Adaptation Layer static std::shared_ptr fs_; - // chunkserver启动传入参数的映射关系(chunkserver id: params_'s index) + // chunkserver starts the mapping relationship of incoming parameters (chunkserver id: params_'s index) std::map paramsIndexs_; - // chunkserver启动需要传递的参数列表 + // List of parameters to be passed for chunkserver startup std::vector params_; // fake mds server @@ -259,15 +259,15 @@ class PeerCluster { }; /** - * 正常 I/O 验证,先写进去,再读出来验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, write it in first, then read it out for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 - * @param sn 本次写入的版本号 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO + * @param sn The version number written this time */ void WriteThenReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -279,14 +279,14 @@ void WriteThenReadVerify(Peer leaderPeer, uint64_t sn = 1); /** - * 正常 I/O 验证,read 数据验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Normal I/O verification, read data verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -297,14 +297,14 @@ void ReadVerify(Peer leaderPeer, int loop); /** - * 读chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify by reading the snapshot of the chunk + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -315,12 +315,12 @@ void ReadSnapshotVerify(Peer leaderPeer, int loop); /** - * 删除chunk的snapshot进行验证 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id - * @param chunkId chunk id - * @param csn corrected sn + *Delete snapshot of chunk for verification + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID + * @param chunkId chunk id + * @param csn corrected sn */ void DeleteSnapshotVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -329,14 +329,14 @@ void DeleteSnapshotVerify(Peer leaderPeer, uint64_t csn); /** - * 异常I/O验证,read数据不符合预期 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + *Abnormal I/O verification, read data does not meet expectations + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadNotVerify(Peer leaderPeer, LogicPoolID logicPoolId, @@ -347,14 +347,14 @@ void ReadNotVerify(Peer leaderPeer, int loop); /** - * 通过read验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through read + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void ReadVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, @@ -365,14 +365,14 @@ void ReadVerifyNotAvailable(Peer leaderPeer, int loop); /** - * 通过write验证可用性 - * @param leaderId 主的 id - * @param logicPoolId 逻辑池 id - * @param copysetId 复制组 id + * Verify availability through write + * @param leaderId Primary ID + * @param logicPoolId Logical Pool ID + * @param copysetId Copy Group ID * @param chunkId chunk id - * @param length 每次 IO 的 length - * @param fillCh 每次 IO 填充的字符 - * @param loop 重复发起 IO 的次数 + * @param length The length of each IO + * @param fillCh Characters filled in each IO + * @param loop The number of times repeatedly initiates IO */ void WriteVerifyNotAvailable(Peer leaderPeer, LogicPoolID logicPoolId, @@ -383,10 +383,10 @@ void WriteVerifyNotAvailable(Peer leaderPeer, int loop); /** - * 验证几个副本的copyset status是否一致 - * @param peerIds: 待验证的peers - * @param logicPoolID: 逻辑池id - * @param copysetId: 复制组id + * Verify if the copyset status of several replicas is consistent + * @param peerIds: Peers to be verified + * @param logicPoolID: Logical Pool ID + * @param copysetId: Copy group ID */ void CopysetStatusVerify(const std::vector &peers, LogicPoolID logicPoolID, @@ -394,10 +394,10 @@ void CopysetStatusVerify(const std::vector &peers, uint64_t expectEpoch = 0); /** - * transfer leader,并且预期能够成功 - * @param cluster: 集群的指针 - * @param targetLeader: 期望tranfer的目标节点 - * @param opt: tranfer 请求使用的 clioption + * transfer leader and expected to succeed + * @param cluster: Pointer to the cluster + * @param targetLeader: The target node for the expected transfer + * @param opt: The clioption used in the transfer request */ void TransferLeaderAssertSuccess(PeerCluster *cluster, const Peer &targetLeader, diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index 5d09293287..5a9d4ca62f 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -320,7 +320,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { auto topologyServiceManager = std::make_shared( topology_, topologyStat_, nullptr, copysetManager, nullptr); - // 初始化basic集群 + // Initialize basic cluster PrepareBasicCluseter(); // init coordinator @@ -341,7 +341,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { heartbeatManager_->Init(); heartbeatManager_->Run(); - // 启动心跳rpc + // Start heartbeat rpc listenAddr_ = conf_.GetStringValue("mds.listen.addr"); heartbeatService_ = std::make_shared(heartbeatManager_); diff --git a/test/integration/heartbeat/common.h b/test/integration/heartbeat/common.h index b281d5a9ab..63214a93a0 100644 --- a/test/integration/heartbeat/common.h +++ b/test/integration/heartbeat/common.h @@ -227,140 +227,140 @@ class FakeTopologyStorage : public TopologyStorage { class HeartbeatIntegrationCommon { public: - /* HeartbeatIntegrationCommon 构造函数 + /* HeartbeatIntegrationCommon constructor * - * @param[in] conf 配置信息 + * @param[in] conf configuration information */ explicit HeartbeatIntegrationCommon(const Configuration &conf) { conf_ = conf; } - /* PrepareAddPoolset 在集群中添加物理池集合 + /* PrepareAddPoolset adds a physical pool collection to the cluster * - * @param[in] poolset 物理池集合(池组) + * @param[in] poolset Physical pool set (pool group) */ void PrepareAddPoolset(const Poolset &poolset); - /* PrepareAddLogicalPool 在集群中添加逻辑池 + /* PrepareAddLogicalPool Adding a Logical Pool to a Cluster * - * @param[in] lpool 逻辑池 + * @param[in] lpool logical pool */ void PrepareAddLogicalPool(const LogicalPool &lpool); - /* PrepareAddPhysicalPool 在集群中添加物理池 + /* PrepareAddPhysicalPool Adding a Physical Pool to a Cluster * - * @param[in] ppool 物理池 + * @param[in] ppool physical pool */ void PrepareAddPhysicalPool(const PhysicalPool &ppool); - /* PrepareAddZone 在集群中添加zone + /* PrepareAddZone adds a zone to the cluster * * @param[in] zone */ void PrepareAddZone(const Zone &zone); - /* PrepareAddServer 在集群中添加server + /* PrepareAddServer Adding a server to a Cluster * * @param[in] server */ void PrepareAddServer(const Server &server); - /* PrepareAddChunkServer 在集群中添加chunkserver节点 + /* PrepareAddChunkServer adds chunkserver nodes to the cluster * * @param[in] chunkserver */ void PrepareAddChunkServer(const ChunkServer &chunkserver); - /* PrepareAddCopySet 在集群中添加copyset + /* PrepareAddCopySet Adding a copyset to a cluster * - * @param[in] copysetId copyset id - * @param[in] logicalPoolId 逻辑池id - * @param[in] members copyset成员 + * @param[in] copysetId copyset ID + * @param[in] logicalPoolId Logical Pool ID + * @param[in] members copyset members */ void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, const std::set &members); - /* UpdateCopysetTopo 更新topology中copyset的状态 + /* UpdateCopysetTopo updates the status of copyset in topology * - * @param[in] copysetId copyset的id - * @param[in] logicalPoolId 逻辑池id - * @param[in] epoch copyset的epoch - * @param[in] leader copyset的leader - * @param[in] members copyset的成员 - * @param[in] candidate copyset的candidate信息 + * @param[in] copysetId The ID of the copyset + * @param[in] logicalPoolId Logical Pool ID + * @param[in] epoch epoch of copyset + * @param[in] leader copyset's leader + * @param[in] members members of copyset + * @param[in] candidate copyset's candidate information */ void UpdateCopysetTopo(CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, ChunkServerIdType leader, const std::set &members, ChunkServerIdType candidate = UNINTIALIZE_ID); - /* SendHeartbeat 发送心跳 + /* SendHeartbeat sends a heartbeat * * @param[in] req - * @param[in] expectedFailed 为true表示希望发送成功,为false表示希望发送失败 + * @param[in] expectedFailed true: to indicate that the transmission is expected to succeed, false: indicate that the transmission is expected to fail * @param[out] response */ void SendHeartbeat(const ChunkServerHeartbeatRequest &request, bool expectFailed, ChunkServerHeartbeatResponse *response); - /* BuildBasicChunkServerRequest 构建最基本的request + /* BuildBasicChunkServerRequest Build the most basic request * - * @param[in] id chunkserver的id - * @param[out] req 构造好的指定id的request + * @param[in] id chunkserver ID + * @param[out] req Constructed request with specified id */ void BuildBasicChunkServerRequest(ChunkServerIdType id, ChunkServerHeartbeatRequest *req); - /* AddCopySetToRequest 向request中添加copyset + /* AddCopySetToRequest adds a copyset to the request * * @param[in] req - * @param[in] csInfo copyset信息 - * @param[in] type copyset当前变更类型 + * @param[in] csInfo copyset information + * @param[in] type copyset Current change type */ void AddCopySetToRequest(ChunkServerHeartbeatRequest *req, const CopySetInfo &csInfo, ConfigChangeType type = ConfigChangeType::NONE); - /* AddOperatorToOpController 向调度模块添加op + /* AddOperatorToOpController adds op to the scheduling module * * @param[in] op */ void AddOperatorToOpController(const Operator &op); - /* RemoveOperatorFromOpController 从调度模块移除指定copyset上的op + /* RemoveOperatorFromOpController removes the op on the specified copyset from the scheduling module * - * @param[in] id 需要移除op的copysetId + * @param[in] id needs to remove the copysetId of op */ void RemoveOperatorFromOpController(const CopySetKey &id); /* - * PrepareBasicCluseter 在topology中构建最基本的拓扑结构 - * 一个物理池,一个逻辑池,三个zone,每个zone一个chunkserver, - * 集群中有一个copyset + * PrepareBasicCluseter builds the most basic topology structure in topology + * One physical pool, one logical pool, three zones, and one chunkserver for each zone, + * There is a copyset in the cluster */ void PrepareBasicCluseter(); /** - * InitHeartbeatOption 初始化heartbeatOption + * InitHeartbeatOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的心跳option + * @param[in] conf configuration module + * @param[out] heartbeat option assignment completed heartbeat option */ void InitHeartbeatOption(Configuration *conf, HeartbeatOption *heartbeatOption); /** - * InitSchedulerOption 初始化scheduleOption + * InitSchedulerOption initializes scheduleOption * - * @param[in] conf 配置模块 - * @param[out] heartbeatOption 赋值完成的调度option + * @param[in] conf configuration module + * @param[out] heartbeat Scheduling option with completed assignment of option */ void InitSchedulerOption(Configuration *conf, ScheduleOption *scheduleOption); /** - * BuildBasicCluster 运行heartbeat/topology/scheduler模块 + * BuildBasicCluster runs the heartbeat/topology/scheduler module */ void BuildBasicCluster(); diff --git a/test/integration/heartbeat/heartbeat_basic_test.cpp b/test/integration/heartbeat/heartbeat_basic_test.cpp index c9a2ae416d..a383d043f2 100644 --- a/test/integration/heartbeat/heartbeat_basic_test.cpp +++ b/test/integration/heartbeat/heartbeat_basic_test.cpp @@ -35,16 +35,16 @@ class HeartbeatBasicTest : public ::testing::Test { void InitConfiguration(Configuration *conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 300); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 500); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", 0); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6879"); - // scheduler相关的内容 + // Schedule related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -65,13 +65,13 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithCandidateOpOnGoing() { - // 构造mds中copyset当前状 + // Construct the current state of copyset in mds ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, std::set{ 1, 2, 3 }, 10); - // 构造scheduler当前的状态 + // Construct the current state of the scheduler Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); @@ -80,14 +80,14 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsNoCnandidateOpOnGoing() { - // 构造mds中copyset当前状态 + // Construct the current state of copyset in mds // copyset-1(epoch=5, peers={1,2,3}, leader=1); ChunkServer cs(10, "testtoekn", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, std::set{ 1, 2, 3 }); - // 构造scheduler当前的状态 + // Construct the current state of the scheduler Operator op(5, CopySetKey{ 1, 1 }, OperatorPriority::NormalPriority, std::chrono::steady_clock::now(), std::make_shared(10)); @@ -96,8 +96,8 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, @@ -111,8 +111,8 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithRemoveOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); - // scheduler中copyset-1有operator: startEpoch=5, step=RemovePeer<4> + // mds has copyset-1(epoch=5, peers={1,2,3,4}, leader=1, , candidate=4); + // There is an operator in copyset-1 in the scheduler: startEpoch=5, step=RemovePeer<4> ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo( @@ -126,8 +126,8 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, std::set{ 1, 2, 3 }); @@ -139,8 +139,8 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithTransferOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); - // scheduler中copyset-1有operator:startEpoch=5,step=TransferLeader{1>2} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=2); + // Copyset-1 in the scheduler has operator:startEpoch=5,step=TransferLeader{1>2} hbtest_->UpdateCopysetTopo(1, 1, 5, 1, std::set{ 1, 2, 3 }, 2); @@ -152,7 +152,7 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrePareMdsWithCandidateNoOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, @@ -160,8 +160,8 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithChangeOp() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1); - // scheduler中copyset-1有operator:startEpoch=5,step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1); + // Copyset-1 in the scheduler has operator:startEpoch=5,step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, @@ -175,8 +175,8 @@ class HeartbeatBasicTest : public ::testing::Test { } void PrepareMdsWithChangeOpOnGoing() { - // mds存在copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); - // scheduler中copyset-1有operator:startEpoch=5,step=step=ChangePeer{3>4} + // mds has copyset-1(epoch=5, peers={1,2,3}, leader=1, candidate=4); + // In the scheduler, copyset-1 has operator:startEpoch=5,step=step=ChangePeer{3>4} ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); hbtest_->UpdateCopysetTopo(1, 1, 5, 1, @@ -255,14 +255,14 @@ class HeartbeatBasicTest : public ::testing::Test { }; TEST_F(HeartbeatBasicTest, test_request_no_chunkserverID) { - // 空的HeartbeatRequest + // Empty HeartbeatRequest ChunkServerHeartbeatRequest req; ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBFAIL, &rep); } TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { - // mds不存在该chunkserver + // The chunkserver does not exist in the mds ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_chunkserverid(4); @@ -273,8 +273,8 @@ TEST_F(HeartbeatBasicTest, test_mds_donnot_has_this_chunkserver) { } TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { - // chunkserver上报的id相同,ip和port不匹配 - // ip不匹配 + // The id reported by chunkserver is the same, but the IP and port do not match + // IP mismatch ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(3, &req); req.set_ip("127.0.0.1"); @@ -283,14 +283,14 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // port不匹配 + // Port mismatch req.set_ip("10.198.100.3"); req.set_port(1111); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); ASSERT_EQ(::curve::mds::heartbeat::hbChunkserverIpPortNotMatch, rep.statuscode()); - // token不匹配 + // Token mismatch req.set_ip("10.198.100.3"); req.set_port(9000); req.set_token("youdao"); @@ -300,20 +300,20 @@ TEST_F(HeartbeatBasicTest, test_chunkserver_ip_port_not_match) { } TEST_F(HeartbeatBasicTest, test_chunkserver_offline_then_online) { - // chunkserver上报心跳时间间隔大于offline - // sleep 800ms, 该chunkserver onffline状态 + // Chunkserver reports that the heartbeat time interval is greater than offline + // Sleep 800ms, the chunkserver onffline status std::this_thread::sleep_for(std::chrono::milliseconds(800)); ChunkServer out; hbtest_->topology_->GetChunkServer(1, &out); ASSERT_EQ(OnlineState::OFFLINE, out.GetOnlineState()); - // chunkserver上报心跳,chunkserver online + // Chunkserver reports heartbeat, chunkserver online ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(out.GetId(), &req); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 后台健康检查程序把chunksrver更新为onlinne状态 + // The backend health check program updates chunksrver to online status uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool updateSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 2) { @@ -528,7 +528,7 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition9) { ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // 上报copyset-1(epoch=2, peers={1,2,3,4}, leader=1) + // Report copyset-1(epoch=2, peers={1,2,3,4}, leader=1) auto copysetMembers = copysetInfo.GetCopySetMembers(); copysetMembers.emplace(4); CopySetInfo csInfo(1, 1); @@ -573,13 +573,13 @@ TEST_F(HeartbeatBasicTest, test_copysets_in_mds_initial_state_condition10) { ASSERT_EQ(0, rep.needupdatecopysets_size()); } -// 上报的是leader +// Reported as the leader TEST_F(HeartbeatBasicTest, test_leader_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver1上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -598,11 +598,11 @@ TEST_F(HeartbeatBasicTest, test_leader_report_consistent_with_mds) { } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -611,7 +611,7 @@ TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5 + // response is empty, mds updates epoch to 5 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( @@ -623,11 +623,11 @@ TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger) { } TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger_leader_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -636,7 +636,7 @@ TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger_leader_not_same) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空,mds更新epoch为5,leader为2 + // response is empty, mds updates epoch to 5, and leader to 2 ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( @@ -647,13 +647,13 @@ TEST_F(HeartbeatBasicTest, test_leader_report_epoch_bigger_leader_not_same) { ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -662,7 +662,7 @@ TEST_F(HeartbeatBasicTest, test_follower_report_consistent_with_mds) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( @@ -673,13 +673,13 @@ TEST_F(HeartbeatBasicTest, test_follower_report_consistent_with_mds) { ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的是follower +// Reported as a follower TEST_F(HeartbeatBasicTest, test_follower_report_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=2, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=2, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -688,7 +688,7 @@ TEST_F(HeartbeatBasicTest, test_follower_report_leader_0) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // response为空 + // response is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( @@ -700,11 +700,11 @@ TEST_F(HeartbeatBasicTest, test_follower_report_leader_0) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -723,11 +723,11 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -746,13 +746,13 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_leader_0) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=3, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -771,11 +771,11 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_bigger_peers_not_same) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -794,11 +794,11 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -817,12 +817,12 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_leader_0) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -841,12 +841,12 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same1) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=1, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=1, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -865,11 +865,11 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_smaller_peers_not_same2) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -888,10 +888,10 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -910,12 +910,12 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_leader_0) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same1) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -934,12 +934,12 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same1) { } TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same2) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -957,14 +957,14 @@ TEST_F(HeartbeatBasicTest, test_follower_report_epoch_0_peers_not_same2) { ASSERT_EQ(res, copysetInfo.GetCopySetMembers()); } -// 上报的不是复制组成员 +// The reported member is not a replication group member TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=2, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -972,7 +972,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -989,12 +989,12 @@ TEST_F(HeartbeatBasicTest, test_other_report_consistent_with_mds) { } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2,3}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1002,7 +1002,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1019,12 +1019,12 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller) { } TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=1, peers={1,2}, leader=1) + // chunkserver4 report copyset-1(epoch=1, peers={1,2}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1032,7 +1032,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1049,13 +1049,13 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_smaller_peers_not_same) { } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1063,7 +1063,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1080,7 +1080,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0) { } TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { - // 更新topology中的copyset-1(epoch=2, peers={1,2,3}, leader=1) + // Update copyset-1(epoch=2, peers={1,2,3}, leader=1) in topology hbtest_->UpdateCopysetTopo(1, 1, 2, 1, std::set{ 1, 2, 3 }); ChunkServer cs4(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); @@ -1088,7 +1088,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { ChunkServer cs5(5, "testtoken", "nvme", 3, "10.198.100.3", 9090, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1096,7 +1096,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1115,7 +1115,7 @@ TEST_F(HeartbeatBasicTest, test_other_report_epoch_0_leader_0_peers_not_same) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -1123,10 +1123,10 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1140,7 +1140,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=5, peers={1,2,3}, leader=1) // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -1149,9 +1149,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { hbtest_->AddCopySetToRequest(&req, csInfo, ConfigChangeType::ADD_PEER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1165,7 +1165,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition2) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition3) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -1173,12 +1173,12 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition3) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1191,7 +1191,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition3) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition4) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1199,12 +1199,12 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition4) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1217,7 +1217,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition4) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition5) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver1上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver1 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -1225,12 +1225,12 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition5) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1243,7 +1243,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition5) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition6) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1252,14 +1252,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition7) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1268,9 +1268,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1283,7 +1283,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition7) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition8) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1292,9 +1292,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1307,7 +1307,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition8) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition9) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1316,9 +1316,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1331,7 +1331,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition9) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition10) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1340,9 +1340,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1355,7 +1355,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition10) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition11) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1364,9 +1364,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1379,7 +1379,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition11) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition12) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1388,14 +1388,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition13) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1404,7 +1404,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition13) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1413,7 +1413,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition14) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1422,7 +1422,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition14) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1431,7 +1431,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition15) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1440,14 +1440,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition16) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1456,14 +1456,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition16) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition17) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1472,7 +1472,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1481,7 +1481,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition18) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1490,7 +1490,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1499,7 +1499,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition19) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1508,14 +1508,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition20) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1524,14 +1524,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition20) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition21) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1540,14 +1540,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition21) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition22) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1556,9 +1556,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition22) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1571,7 +1571,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition22) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition23) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 report copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1580,9 +1580,9 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition23) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology + // Check topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1595,7 +1595,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition23) { TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition24) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1604,14 +1604,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition24) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition25) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1620,7 +1620,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition25) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1629,7 +1629,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition26) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1638,7 +1638,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition26) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1647,7 +1647,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition27) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1656,14 +1656,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition27) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition28) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1672,14 +1672,14 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition28) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition29) { PrepareMdsNoCnandidateOpOnGoing(); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1688,7 +1688,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition29) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1697,7 +1697,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1706,7 +1706,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition30) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1715,7 +1715,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition31) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报的copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver10 report copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -1724,7 +1724,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition31) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } @@ -1733,7 +1733,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition32) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1742,7 +1742,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition32) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1756,7 +1756,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition33) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1765,7 +1765,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition33) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1781,7 +1781,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition34) { ChunkServer cs5(5, "testtoekn", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs5); - // chunkserver4上报的copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver4 report copyset-1(epoch=4, peers={1,2,3,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1790,7 +1790,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition34) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1804,7 +1804,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报的copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 report copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -1813,7 +1813,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -1825,7 +1825,7 @@ TEST_F(HeartbeatBasicTest, test_mdsNoCandidate_OpOnGoing_condition35) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -1834,7 +1834,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(ConfigChangeType::ADD_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", @@ -1844,7 +1844,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition1) { TEST_F(HeartbeatBasicTest, test_test_mdsWithCandidate_OpOnGoing_condition2) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报 + // chunkserver1 reporting // copyset-1(epoch=5, peers={1,2,3}, leader=1, // conf.gChangeInfo={peer: 10, type: AddPeer} ) ChunkServerHeartbeatRequest req; @@ -1855,14 +1855,14 @@ TEST_F(HeartbeatBasicTest, test_test_mdsWithCandidate_OpOnGoing_condition2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1871,12 +1871,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1888,7 +1888,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition3) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition4) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver1上报copyset-1(epoch=6, peers={1,2,3,10}, leader=2) + // chunkserver1 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -1897,12 +1897,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1916,7 +1916,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition4) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition5) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) + // chunkserver2 reports copyset-1(epoch=7, peers={1,2,3, 10}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1925,12 +1925,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查topology中copyset的状态 + // Check the status of copyset in topology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -1944,7 +1944,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition5) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition6) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1953,17 +1953,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition7) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1972,17 +1972,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition8) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -1991,12 +1991,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2010,7 +2010,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition8) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition9) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver2 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2019,12 +2019,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2038,7 +2038,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition9) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition10) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2047,12 +2047,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2066,7 +2066,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition10) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition11) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2075,12 +2075,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2096,7 +2096,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition12) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2105,12 +2105,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2126,7 +2126,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition13) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2135,12 +2135,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition13) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2154,7 +2154,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition13) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition14) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2163,12 +2163,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition14) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2182,7 +2182,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition14) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition15) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2191,12 +2191,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2212,7 +2212,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition16) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2221,12 +2221,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition16) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2242,7 +2242,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition17) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2251,12 +2251,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2270,7 +2270,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition17) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition18) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2279,12 +2279,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2298,7 +2298,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition18) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition19) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2307,12 +2307,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2326,7 +2326,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition19) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition20) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=1) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2335,12 +2335,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition20) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2354,7 +2354,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition20) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition21) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=6, peers={1,2,3,10}, leader=0) + // chunkserver10 reports copyset-1(epoch=6, peers={1,2,3,10}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2363,12 +2363,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition21) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2382,7 +2382,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition21) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition22) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2391,12 +2391,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition22) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2410,7 +2410,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition22) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition23) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2419,12 +2419,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition23) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2440,7 +2440,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition24) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2449,12 +2449,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition24) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2470,7 +2470,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition25) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver10上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver10 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); CopySetInfo csInfo(1, 1); @@ -2479,12 +2479,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition25) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2498,7 +2498,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition25) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition26) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); @@ -2508,12 +2508,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition26) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2527,7 +2527,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition26) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition27) { PrepareMdsWithCandidateOpOnGoing(); - // chunkserver10上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver10 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(10, &req); hbtest_->BuildBasicChunkServerRequest(10, &req); @@ -2537,12 +2537,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition27) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2558,7 +2558,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -2567,10 +2567,10 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2580,7 +2580,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition28) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2596,7 +2596,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -2605,10 +2605,10 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2618,7 +2618,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition29) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2634,7 +2634,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { ChunkServer cs(4, "testtoekn", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -2643,10 +2643,10 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查sheduler中的operator + // Check the operator in sheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2656,7 +2656,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { rep.needupdatecopysets(0).peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", rep.needupdatecopysets(0).peers(2).address()); - // 检查copyset + // Check copyset ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE( hbtest_->topology_->GetCopySet(CopySetKey{ 1, 1 }, ©setInfo)); @@ -2670,7 +2670,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidate_OpOnGoing_condition30) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -2679,7 +2679,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -2694,14 +2694,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_1) { ASSERT_EQ(ConfigChangeType::REMOVE_PEER, rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.3:9001:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_2) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // cofigChangeInfo={peer: 4, type:REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -2712,16 +2712,16 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -2730,12 +2730,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2743,7 +2743,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { PrepareMdsWithRemoveOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -2752,13 +2752,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2766,7 +2766,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_4) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2775,14 +2775,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(7); csInfo.SetLeader(2); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -2790,7 +2790,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2799,11 +2799,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -2814,7 +2814,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2823,12 +2823,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -2839,7 +2839,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2848,12 +2848,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -2864,7 +2864,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2873,13 +2873,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -2890,7 +2890,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2899,13 +2899,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -2916,7 +2916,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2925,14 +2925,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -2943,7 +2943,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2952,31 +2952,31 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -2987,7 +2987,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -2996,32 +2996,32 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 report (epoch=4, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -3032,7 +3032,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3041,31 +3041,31 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -3076,7 +3076,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_14) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_15) { PrepareMdsWithRemoveOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3085,33 +3085,33 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -3124,7 +3124,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -3134,7 +3134,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3145,10 +3145,10 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_16) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3161,7 +3161,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -3171,7 +3171,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3182,11 +3182,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_17) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3199,7 +3199,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -3209,7 +3209,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3220,11 +3220,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_18) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3237,7 +3237,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -3247,7 +3247,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -3258,12 +3258,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3273,7 +3273,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOp_19) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_1) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1, // configChangeInfo={peer: 4, type: REMOVE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -3284,11 +3284,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_1) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3298,7 +3298,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_1) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_2) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -3307,19 +3307,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(6); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_3) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3328,18 +3328,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_4) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3348,18 +3348,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_5) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3368,18 +3368,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_6) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3388,12 +3388,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3403,7 +3403,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_6) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_7) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3412,13 +3412,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3428,7 +3428,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_7) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_8) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3437,14 +3437,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3454,7 +3454,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_8) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_9) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3463,15 +3463,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3481,7 +3481,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_9) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_10) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3490,14 +3490,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3507,7 +3507,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_10) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_11) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3516,15 +3516,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3534,7 +3534,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_11) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_12) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3543,14 +3543,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3560,7 +3560,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_12) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_13) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3569,14 +3569,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_13) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3586,7 +3586,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_13) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_14) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3595,14 +3595,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_14) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3612,7 +3612,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_14) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_15) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3621,15 +3621,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3639,7 +3639,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_15) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_16) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3648,13 +3648,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_16) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3664,7 +3664,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_16) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_17) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3673,14 +3673,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3690,7 +3690,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_17) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_18) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3699,14 +3699,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3716,7 +3716,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_18) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_19) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -3725,15 +3725,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3743,7 +3743,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_19) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_20) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3752,12 +3752,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_20) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3767,7 +3767,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_20) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_21) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3776,13 +3776,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_21) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3792,7 +3792,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_21) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_22) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3801,13 +3801,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_22) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3817,7 +3817,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_22) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_23) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3826,14 +3826,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_23) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3843,7 +3843,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_23) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_24) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3852,14 +3852,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_24) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3869,7 +3869,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_24) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_25) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3878,15 +3878,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_25) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3896,7 +3896,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_25) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_26) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3905,14 +3905,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_26) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3922,7 +3922,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_26) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_27) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3931,14 +3931,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_27) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3948,7 +3948,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_27) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_28) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3957,14 +3957,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_28) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -3974,7 +3974,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_28) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_29) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -3983,15 +3983,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_29) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4001,7 +4001,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_29) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_30) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -4010,13 +4010,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_30) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4026,7 +4026,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_30) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_31) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -4035,14 +4035,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_31) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4052,7 +4052,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_31) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_32) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -4061,14 +4061,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_32) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4078,7 +4078,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_32) { TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_33) { PrepareMdsWithRemoveOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -4087,15 +4087,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_33) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4108,7 +4108,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=5, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -4118,7 +4118,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4129,11 +4129,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_34) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4146,7 +4146,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -4156,7 +4156,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4167,19 +4167,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=4, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); @@ -4187,7 +4187,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4198,13 +4198,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_35) { ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ("10.198.100.3:9001:0", conf.peers(3).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4217,7 +4217,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3,4}, leader=0 ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -4227,26 +4227,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // 非复制组成员chunkserver-5上报 + // Non replication group member chunkserver-5 reporting // copyset-1(epoch=0, peers={1,2,3}, leader=0) rep.Clear(); req.Clear(); @@ -4254,20 +4254,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); ASSERT_EQ(1, conf.copysetid()); ASSERT_EQ(4, conf.peers_size()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3, 4 }); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4277,7 +4277,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithRemoveOpOnGoing_36) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -4285,7 +4285,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); ASSERT_EQ(1, rep.needupdatecopysets(0).copysetid()); ASSERT_EQ(5, rep.needupdatecopysets(0).epoch()); @@ -4299,9 +4299,9 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { rep.needupdatecopysets(0).type()); ASSERT_EQ("10.198.100.2:9000:0", rep.needupdatecopysets(0).configchangeitem().address()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4311,7 +4311,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_1) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_2) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -4321,11 +4321,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_2) { ConfigChangeType::TRANSFER_LEADER); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4335,7 +4335,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_2) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_3) { PrepareMdsWithTransferOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -4343,18 +4343,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_3) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_4) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4362,18 +4362,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_4) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_5) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4381,11 +4381,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_5) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4395,7 +4395,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_6) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4403,12 +4403,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_6) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4418,7 +4418,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_7) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4426,12 +4426,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_7) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4441,7 +4441,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_8) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4449,13 +4449,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_8) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4465,7 +4465,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4473,19 +4473,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -4494,13 +4494,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4510,7 +4510,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4518,20 +4518,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -4540,14 +4540,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4557,7 +4557,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4565,19 +4565,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -4586,13 +4586,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4602,7 +4602,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_12) { PrepareMdsWithTransferOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4610,20 +4610,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_12) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -4632,14 +4632,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_12) { BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4651,7 +4651,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -4659,7 +4659,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4669,10 +4669,10 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4684,7 +4684,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { PrepareMdsWithTransferOp(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -4692,7 +4692,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4702,11 +4702,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_14) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4720,7 +4720,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { hbtest_->PrepareAddChunkServer(cs1); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -4728,7 +4728,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4738,12 +4738,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4758,7 +4758,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -4766,7 +4766,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4776,25 +4776,25 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -4804,12 +4804,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -4819,7 +4819,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOp_16) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_1) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1, + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1, // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -4830,11 +4830,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_1) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4844,7 +4844,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_1) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_2) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -4853,18 +4853,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_3) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4873,18 +4873,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_4) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4893,12 +4893,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4908,7 +4908,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_4) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_5) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4917,13 +4917,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4933,7 +4933,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_5) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_6) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4942,13 +4942,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -4958,7 +4958,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_6) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -4966,20 +4966,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -4988,14 +4988,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -5005,7 +5005,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_7) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5013,21 +5013,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -5036,15 +5036,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -5054,7 +5054,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_8) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5062,20 +5062,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -5084,14 +5084,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(2); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -5101,7 +5101,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_9) { TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_10) { PrepareMdsWithTransferOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5109,21 +5109,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_10) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); rep.Clear(); @@ -5132,15 +5132,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_10) { BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -5152,7 +5152,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5160,7 +5160,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5170,11 +5170,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_11) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -5186,7 +5186,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { PrepareMdsWithTransferOpOnGoing(); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5194,7 +5194,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5204,12 +5204,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_12) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -5224,7 +5224,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -5232,7 +5232,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5242,26 +5242,26 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(2, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5271,13 +5271,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(2); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -5287,7 +5287,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithTransferOpOnGoing_13) { TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_1) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -5296,18 +5296,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_1) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_2) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: ADD_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -5317,18 +5317,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_3) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -5337,18 +5337,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_4) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5358,18 +5358,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_5) { PrePareMdsWithCandidateNoOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -5378,18 +5378,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_6) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,3,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,3,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5398,18 +5398,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_7) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5418,19 +5418,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_8) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5439,20 +5439,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_9) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5462,20 +5462,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_10) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5485,21 +5485,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_11) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5509,21 +5509,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_12) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5533,22 +5533,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_13) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5556,40 +5556,40 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_13) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_14) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5597,40 +5597,40 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_14) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_15) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5638,38 +5638,38 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_15) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_16) { PrePareMdsWithCandidateNoOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -5677,40 +5677,40 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_16) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_17) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5719,19 +5719,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_18) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5740,20 +5740,20 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_19) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5762,21 +5762,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_20) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5785,22 +5785,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_20) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_21) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5809,21 +5809,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_21) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_22) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5833,22 +5833,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_22) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_23) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5856,17 +5856,17 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_23) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); @@ -5875,21 +5875,21 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_23) { BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_24) { PrePareMdsWithCandidateNoOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -5897,18 +5897,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_24) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); req.Clear(); @@ -5917,15 +5917,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_24) { BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5934,7 +5934,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -5943,7 +5943,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5953,11 +5953,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_25) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -5966,7 +5966,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -5976,7 +5976,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -5986,12 +5986,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_26) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6000,7 +6000,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -6010,7 +6010,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6020,13 +6020,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_27) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6035,7 +6035,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { PrePareMdsWithCandidateNoOp(); ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -6043,7 +6043,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6053,23 +6053,23 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6079,22 +6079,22 @@ TEST_F(HeartbeatBasicTest, test_mdsWithCandidateNoOp_28) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_1) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -6104,20 +6104,20 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_1) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_2) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -6128,20 +6128,20 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_3) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -6150,20 +6150,20 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_4) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -6174,22 +6174,22 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_5) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -6198,22 +6198,22 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_6) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) // configChangeInfo={peer: 2, type: TRANSFER_LEADER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -6225,20 +6225,20 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_7) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -6247,21 +6247,21 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_8) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -6270,20 +6270,20 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_9) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -6291,38 +6291,38 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_9) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_10) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; @@ -6332,40 +6332,40 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_10) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_11) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -6373,38 +6373,38 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_11) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_12) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; @@ -6414,40 +6414,40 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_12) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_13) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -6455,38 +6455,38 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_13) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_14) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; @@ -6496,40 +6496,40 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_14) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(2, &req); BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 4 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; @@ -6540,7 +6540,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6550,19 +6550,19 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_15) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServer cs(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs); ChunkServerHeartbeatRequest req; @@ -6572,7 +6572,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6582,18 +6582,18 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_16) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_17) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; @@ -6603,17 +6603,17 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_17) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -6622,23 +6622,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_17) { BuildCopySetInfo(&csInfo, 6, 1, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_18) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; @@ -6648,16 +6648,16 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_18) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -6666,23 +6666,23 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_18) { BuildCopySetInfo(&csInfo, 6, 0, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; @@ -6693,7 +6693,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6703,15 +6703,15 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -6720,7 +6720,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6730,21 +6730,21 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_19) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; @@ -6754,7 +6754,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6764,14 +6764,14 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -6780,7 +6780,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6790,21 +6790,21 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_20) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; @@ -6814,7 +6814,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6824,15 +6824,15 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=1) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -6841,7 +6841,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { BuildCopySetInfo(&csInfo, 0, 1, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6851,21 +6851,21 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_21) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(0); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { - // 更新topology中的copyset-1(epoch=5, peers={1,2,3}, leader=0) + // Update copyset-1(epoch=5, peers={1,2,3}, leader=0) in topology hbtest_->UpdateCopysetTopo(1, 1, 5, 0, std::set{ 1, 2, 3 }); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServer cs1(4, "testtoken", "nvme", 3, "10.198.100.3", 9001, "/"); hbtest_->PrepareAddChunkServer(cs1); ChunkServerHeartbeatRequest req; @@ -6875,7 +6875,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6885,14 +6885,14 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3,5}, leader=0) req.Clear(); rep.Clear(); ChunkServer cs2(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); @@ -6901,7 +6901,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { BuildCopySetInfo(&csInfo, 0, 0, std::set{ 1, 2, 3, 5 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6911,11 +6911,11 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -6923,7 +6923,7 @@ TEST_F(HeartbeatBasicTest, test_mdsCopysetNoLeader_22) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -6931,7 +6931,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -6944,11 +6944,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -6960,7 +6960,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -6970,11 +6970,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -6985,7 +6985,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -6994,11 +6994,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7006,7 +7006,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_4) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7015,11 +7015,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7029,7 +7029,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -7038,11 +7038,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7050,7 +7050,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { PrepareMdsWithChangeOp(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -7059,11 +7059,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7071,7 +7071,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7080,11 +7080,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7092,7 +7092,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7101,11 +7101,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -7113,7 +7113,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7122,11 +7122,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7137,7 +7137,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7146,12 +7146,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7162,7 +7162,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7171,12 +7171,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7187,7 +7187,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7196,13 +7196,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7213,7 +7213,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7222,13 +7222,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7239,7 +7239,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_14) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7248,14 +7248,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_14) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7268,7 +7268,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7277,13 +7277,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7296,7 +7296,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7305,14 +7305,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7323,7 +7323,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7332,12 +7332,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7348,7 +7348,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_18) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7357,13 +7357,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7376,7 +7376,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7385,13 +7385,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7404,7 +7404,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7413,14 +7413,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7431,7 +7431,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7440,12 +7440,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7456,7 +7456,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_22) { PrepareMdsWithChangeOp(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7465,13 +7465,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_22) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7484,7 +7484,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7493,13 +7493,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_23) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7512,7 +7512,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -7521,14 +7521,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7539,7 +7539,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7548,11 +7548,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7563,7 +7563,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7572,12 +7572,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7588,7 +7588,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7597,13 +7597,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7614,7 +7614,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7623,14 +7623,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7641,7 +7641,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7650,12 +7650,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7666,7 +7666,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_30) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7675,13 +7675,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_30) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7694,7 +7694,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7703,13 +7703,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_31) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7722,7 +7722,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7731,14 +7731,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7749,7 +7749,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7758,12 +7758,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7774,7 +7774,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_34) { PrepareMdsWithChangeOp(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7783,13 +7783,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_34) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7802,7 +7802,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7811,13 +7811,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_35) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7830,7 +7830,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -7839,14 +7839,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_36) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7859,7 +7859,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -7868,7 +7868,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7878,11 +7878,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7895,7 +7895,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -7904,7 +7904,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7914,10 +7914,10 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7928,13 +7928,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) hbtest_->BuildBasicChunkServerRequest(5, &req); BuildCopySetInfo(&csInfo, 4, 1, std::set{ 1, 2, 6 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7944,11 +7944,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -7961,7 +7961,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -7970,7 +7970,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -7980,11 +7980,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -7993,7 +7993,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -8001,7 +8001,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8011,12 +8011,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -8029,7 +8029,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -8038,7 +8038,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8048,18 +8048,18 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -8067,7 +8067,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8077,12 +8077,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -8093,7 +8093,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOp_40) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -8101,7 +8101,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); auto conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -8114,11 +8114,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { ASSERT_EQ(ConfigChangeType::CHANGE_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); ASSERT_EQ("10.198.100.3:9000:0", conf.oldpeer().address()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8130,7 +8130,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_1) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) // configChangeInfo={peer: 4, type: CHANGE_PEER}) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); @@ -8140,11 +8140,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8155,7 +8155,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_2) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -8164,11 +8164,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8176,7 +8176,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_3) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_4) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=2) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8185,11 +8185,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_4) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8199,7 +8199,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -8208,11 +8208,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8220,7 +8220,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_5) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-1上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-1 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(1, &req); CopySetInfo csInfo(1, 1); @@ -8229,11 +8229,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8241,7 +8241,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_6) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=7, peers={1,2,4}, leader=2) + // chunkserver-2 reports copyset-1(epoch=7, peers={1,2,4}, leader=2) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8250,11 +8250,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8262,7 +8262,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_7) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=7, peers={1,2,4}, leader=4) + // chunkserver-4 reports copyset-1(epoch=7, peers={1,2,4}, leader=4) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8271,11 +8271,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(0, ops.size()); } @@ -8283,7 +8283,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_8) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8292,12 +8292,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8308,7 +8308,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_9) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8317,13 +8317,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8334,7 +8334,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_10) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8343,13 +8343,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8360,7 +8360,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_11) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8369,14 +8369,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8387,7 +8387,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_12) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8396,14 +8396,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8414,7 +8414,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_13) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_14) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8423,15 +8423,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_14) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8444,7 +8444,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_15) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8453,14 +8453,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_15) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8473,7 +8473,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=6, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=6, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8482,15 +8482,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8501,7 +8501,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_16) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8510,13 +8510,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8527,7 +8527,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_17) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_18) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8536,14 +8536,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_18) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8556,7 +8556,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_19) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8565,14 +8565,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_19) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8585,7 +8585,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8594,15 +8594,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8613,7 +8613,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_20) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8622,13 +8622,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8639,7 +8639,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_21) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_22) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8648,14 +8648,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_22) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8668,7 +8668,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_23) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8677,14 +8677,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_23) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8697,7 +8697,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-2上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-2 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(2, &req); CopySetInfo csInfo(1, 1); @@ -8706,15 +8706,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8725,7 +8725,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_24) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8734,12 +8734,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8750,7 +8750,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_25) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=5, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=5, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8759,13 +8759,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8776,7 +8776,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_26) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=1) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8785,14 +8785,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8803,7 +8803,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_27) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=6, peers={1,2,4}, leader=0) + // chunkserver-4 reports copyset-1(epoch=6, peers={1,2,4}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8812,15 +8812,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); csInfo.SetLeader(1); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8831,7 +8831,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_28) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8840,13 +8840,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8857,7 +8857,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_29) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_30) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8866,14 +8866,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_30) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8886,7 +8886,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_31) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8895,14 +8895,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_31) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8915,7 +8915,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=4, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=4, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8924,15 +8924,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8943,7 +8943,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_32) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8952,13 +8952,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8969,7 +8969,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_33) { TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_34) { PrepareMdsWithChangeOpOnGoing(); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -8978,14 +8978,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_34) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -8998,7 +8998,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_35) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=1) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -9007,14 +9007,14 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_35) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -9027,7 +9027,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_36) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-4上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-4 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(4, &req); CopySetInfo csInfo(1, 1); @@ -9036,15 +9036,15 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_36) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -9057,7 +9057,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,5}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,5}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -9066,7 +9066,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9076,12 +9076,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_37) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -9094,7 +9094,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -9103,7 +9103,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9113,12 +9113,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -9129,13 +9129,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { hbtest_->PrepareAddChunkServer(cs2); req.Clear(); rep.Clear(); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=0) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=0) hbtest_->BuildBasicChunkServerRequest(5, &req); BuildCopySetInfo(&csInfo, 4, 0, std::set{ 1, 2, 6 }); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9145,13 +9145,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_38) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -9164,7 +9164,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,3}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,3}, leader=1) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -9173,7 +9173,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9183,11 +9183,11 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); @@ -9196,7 +9196,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ChunkServer cs2(6, "testtoken", "nvme", 3, "10.198.100.3", 9003, "/"); hbtest_->PrepareAddChunkServer(cs2); - // chunkserver-5上报copyset-1(epoch=4, peers={1,2,6}, leader=1) + // chunkserver-5 reports copyset-1(epoch=4, peers={1,2,6}, leader=1) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -9204,7 +9204,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9214,12 +9214,12 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_39) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); @@ -9232,7 +9232,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ChunkServer cs(5, "testtoken", "nvme", 3, "10.198.100.3", 9002, "/"); hbtest_->PrepareAddChunkServer(cs); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,3}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,3}, leader=0) ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(5, &req); CopySetInfo csInfo(1, 1); @@ -9241,7 +9241,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9251,19 +9251,19 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler auto ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); auto step = dynamic_cast(ops[0].step.get()); ASSERT_TRUE(nullptr != step); ASSERT_EQ(4, step->GetTargetPeer()); - // chunkserver-5上报copyset-1(epoch=0, peers={1,2,4}, leader=0) + // chunkserver-5 reports copyset-1(epoch=0, peers={1,2,4}, leader=0) req.Clear(); rep.Clear(); hbtest_->BuildBasicChunkServerRequest(5, &req); @@ -9271,7 +9271,7 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response + // Check response ASSERT_EQ(1, rep.needupdatecopysets_size()); conf = rep.needupdatecopysets(0); ASSERT_EQ(1, conf.logicalpoolid()); @@ -9281,13 +9281,13 @@ TEST_F(HeartbeatBasicTest, test_mdsWithChangePeerOpOnGoing_40) { ASSERT_EQ("10.198.100.2:9000:0", conf.peers(1).address()); ASSERT_EQ("10.198.100.3:9000:0", conf.peers(2).address()); ASSERT_EQ(5, conf.epoch()); - // 检查copyset + // Check copyset csInfo.SetEpoch(5); csInfo.SetLeader(1); csInfo.SetCandidate(4); csInfo.SetCopySetMembers(std::set{ 1, 2, 3 }); ASSERT_TRUE(ValidateCopySet(csInfo)); - // 检查scheduler中的op + // Check op in scheduler ops = hbtest_->coordinator_->GetOpController()->GetOperators(); ASSERT_EQ(1, ops.size()); step = dynamic_cast(ops[0].step.get()); diff --git a/test/integration/heartbeat/heartbeat_exception_test.cpp b/test/integration/heartbeat/heartbeat_exception_test.cpp index 67ac0bcf01..c15ed38e58 100644 --- a/test/integration/heartbeat/heartbeat_exception_test.cpp +++ b/test/integration/heartbeat/heartbeat_exception_test.cpp @@ -34,16 +34,16 @@ class HeartbeatExceptionTest : public ::testing::Test { void InitConfiguration(Configuration *conf) { conf->SetIntValue("mds.topology.ChunkServerStateUpdateSec", 0); - // heartbeat相关配置设置 + // heartbeat related configuration settings conf->SetIntValue("mds.heartbeat.intervalMs", 100); conf->SetIntValue("mds.heartbeat.misstimeoutMs", 3000); conf->SetIntValue("mds.heartbeat.offlinetimeoutMs", 5000); conf->SetIntValue("mds.heartbeat.clean_follower_afterMs", sleepTimeMs_); - // mds监听端口号 + // Mds listening port number conf->SetStringValue("mds.listen.addr", "127.0.0.1:6880"); - // scheduler相关的内容 + // scheduler related content conf->SetBoolValue("mds.enable.copyset.scheduler", false); conf->SetBoolValue("mds.enable.leader.scheduler", false); conf->SetBoolValue("mds.enable.recover.scheduler", false); @@ -95,30 +95,30 @@ class HeartbeatExceptionTest : public ::testing::Test { }; /* - * bug说明:稳定性测试环境,宕机一台机器之后设置pending,副本恢复过程中mds有切换 - * 最终发现有5个pending状态的chunkserver没有完成迁移 - * 分析: - * 1. mds1提供服务时产生operator并下发给copyset-1{A,B,C} + - * D的变更,C是offline状态 - * 2. copyset-1完成配置变更,此时leader上的配置更新为epoch=2/{A,B,C,D}, - * candidate上的配置为epoch=1/{A,B,C}, mds1中记录的配置为epoch=1/{A,B,C} - * 3. mds1挂掉,mds2提供服务, 并从数据库加载copyset,mds2中copyset-1的配置 - * epoch=1/{A,B,C} - * 4. candidate-D上报心跳,copyset-1的配置为epoch=1/{A,B,C}。mds2发现D上报的 - * copyset中epoch和mds2记录的相同,但D并不在mds2记录的复制组中且调度模块也没有 - * 对应的operator,下发命令把D上的copyset-1删除导致D被误删 + * Bug Description: In a stability testing environment, when one machine crashes, it is set to "pending," and during the replica recovery process, there is MDS switching. + Eventually, it was found that there were 5 "pending" chunk servers that did not complete migration. + + * Analysis: + * 1. When MDS1 is providing services, it generates an operator and sends it to + * copyset-1 {A, B, C} + D for modification, where C is in an offline state. + * 2. Copyset-1 completes the configuration change. At this point, the configuration on the leader is updated to epoch=2/{A, B, C, D}, + * the configuration on the candidate is epoch=1/{A, B, C}, and the configuration recorded in MDS1 is epoch=1/{A, B, C}. + * 3. MDS1 crashes, and MDS2 takes over the service. MDS2 loads copysets from the database, and the configuration for copyset-1 in MDS2 is epoch=1/{A, B, C}. + * 4. Candidate-D reports a heartbeat, and the configuration for copyset-1 is epoch=1/{A, B, C}. + * MDS2 finds that the epoch reported by D matches the one recorded in MDS2, but D is not in the replication group recorded by MDS2, + * and there is no corresponding operator in the scheduling module. As a result, a command is issued to delete copyset-1 on D, leading to an accidental deletion of D. * - * 解决方法: - * 正常情况下,heartbeat模块会在mds启动一定时间(目前配置20min)后才可以下发删除copyset - * 的命令,极大概率保证这段时间内copyset-leader上的配置更新到mds, - * 防止刚加入复制组 副本上的数据被误删 + * Solution: + * Under normal circumstances, the heartbeat module should wait for a certain period (currently configured as 20 minutes) after MDS starts before issuing a command to delete a copyset. + * This greatly ensures that during this time, the configuration on the copyset-leader is updated in MDS, + * preventing the accidental deletion of data on replicas that have just joined the replication group. * - * 这个时间的起始点应该是mds正式对外提供服务的时间,而不是mds的启动时间。如果设置为mds的启动 - * 时间,备mds启动很久后如果能够提供服务,就立马可以删除,导致bug + * The starting point for this time should be when MDS officially starts providing external services, + * rather than the MDS startup time. If it is set based on the MDS startup time, then if the standby MDS starts much later but can still provide services, it could be deleted immediately, leading to the bug. */ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { // 1. copyset-1(epoch=2, peers={1,2,3}, leader=1) - // scheduler中有+4的operator + // In the scheduler, there is an operator that +4 CopySetKey key{ 1, 1 }; int startEpoch = 2; ChunkServerIdType leader = 1; @@ -131,8 +131,8 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { op.timeLimit = std::chrono::seconds(3); hbtest_->AddOperatorToOpController(op); - // 2. leader上报copyset-1(epoch=2, peers={1,2,3}, leader=1) - // mds下发配置变更 + // 2. leader reports copyset-1(epoch=2, peers={1,2,3}, leader=1) + // mds issued configuration changes ChunkServerHeartbeatRequest req; hbtest_->BuildBasicChunkServerRequest(leader, &req); CopySetInfo csInfo(key.first, key.second); @@ -140,7 +140,7 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { hbtest_->AddCopySetToRequest(&req, csInfo); ChunkServerHeartbeatResponse rep; hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发+D的配置变更 + // Check the response and issue configuration changes for+D ASSERT_EQ(1, rep.needupdatecopysets_size()); CopySetConf conf = rep.needupdatecopysets(0); ASSERT_EQ(key.first, conf.logicalpoolid()); @@ -150,25 +150,25 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(ConfigChangeType::ADD_PEER, conf.type()); ASSERT_EQ("10.198.100.3:9001:0", conf.configchangeitem().address()); - // 3. 清除mds中的operrator(模拟mds重启) + //3 Clear the optimizer in mds (simulate mds restart) hbtest_->RemoveOperatorFromOpController(key); - // 4. canndidate上报落后的与mds的配置(candidate回放日志时会一一apply旧配置): + // 4. The candidate reports the outdated configuration compared to MDS (the candidate replays logs one by one to apply the old configuration): // copyset-1(epoch=1, peers={1,2,3}, leader=1) - // 由于mds.heartbeat.clean_follower_afterMs时间还没有到,mds还不能下发 - // 删除命令。mds下发为空,candidate上的数据不会被误删 + // Because mds.heartbeat.clean_follower_afterMs time has not yet elapsed, MDS cannot issue + // deletion commands. MDS issues no commands, so the data on the candidate will not be accidentally deleted. rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 5. 睡眠mds.heartbeat.clean_follower_afterMs + 10ms后 - // canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds下发删除配置,candidate上的数据会被误删 + // 5. Sleep mds.heartbeat.clean_follower_afterMs + 10ms, + // candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds issues a deletion configuration, and the data on the candidate will be mistakenly deleted usleep((sleepTimeMs_ + 10) * 1000); rep.Clear(); req.Clear(); @@ -183,7 +183,7 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { ASSERT_EQ(peers.size(), conf.peers_size()); ASSERT_EQ(startEpoch, conf.epoch()); - // 6. leader上报最新配置copyset-1(epoch=3, peers={1,2,3,4}, leader=1) + // 6. leader reports the latest configuration copyset-1(epoch=3, peers={1,2,3,4}, leader=1) auto newPeers = peers; newPeers.emplace(candidate); auto newEpoch = startEpoch + 1; @@ -193,24 +193,24 @@ TEST_F(HeartbeatExceptionTest, test_mdsRestart_opLost) { BuildCopySetInfo(&csInfo, startEpoch + 1, leader, newPeers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 为空 + // Check the response, it is empty ASSERT_EQ(0, rep.needupdatecopysets_size()); - // 检查mdstopology的数据 + // Check the data of mdstopology ::curve::mds::topology::CopySetInfo copysetInfo; ASSERT_TRUE(hbtest_->topology_->GetCopySet(key, ©setInfo)); ASSERT_EQ(newEpoch, copysetInfo.GetEpoch()); ASSERT_EQ(leader, copysetInfo.GetLeader()); ASSERT_EQ(newPeers, copysetInfo.GetCopySetMembers()); - // 7. canndidate上报staled copyset-1(epoch=1, peers={1,2,3}, leader=1) - // mds不下发配置 + // 7. candidate reports staled copyset-1(epoch=1, peers={1,2,3}, leader=1) + // mds does not distribute configuration rep.Clear(); req.Clear(); hbtest_->BuildBasicChunkServerRequest(candidate, &req); BuildCopySetInfo(&csInfo, startEpoch - 1, leader, peers); hbtest_->AddCopySetToRequest(&req, csInfo); hbtest_->SendHeartbeat(req, SENDHBOK, &rep); - // 检查response, 下发copyset当前配置指导candidate删除数据 + // Check the response and issue the copyset current configuration guide candidate to delete data ASSERT_EQ(0, rep.needupdatecopysets_size()); } diff --git a/test/integration/raft/raft_config_change_test.cpp b/test/integration/raft/raft_config_change_test.cpp index 5660617558..3dd54182bf 100644 --- a/test/integration/raft/raft_config_change_test.cpp +++ b/test/integration/raft/raft_config_change_test.cpp @@ -270,7 +270,7 @@ class RaftConfigChangeTest : public testing::Test { std::map paramsIndexs; std::vector params; int maxWaitInstallSnapshotMs; - // 等待多个副本数据一致的时间 + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; @@ -279,11 +279,11 @@ class RaftConfigChangeTest : public testing::Test { butil::AtExitManager atExitManager; /** - * 1. 3个节点正常启动 - * 2. 移除一个follower - * 3. 重复移除上一个follower - * 4. 再添加回来 - * 5. 重复添加回来 + * 1. 3 nodes start normally + * 2. Remove a follower + * 3. Repeatedly remove the previous follower + * 4. Add it back again + * 5. Repeatedly add back */ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { LogicPoolID logicPoolId = 2; @@ -293,7 +293,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 member LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -325,7 +325,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ch++, loop); - // 2. 移除1个follower + // 2. Remove 1 follower LOG(INFO) << "remove 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -348,7 +348,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ch++, loop); - // 3. 重复移除,验证重复移除的逻辑是否正常 + // 3. Duplicate removal, verify if the logic of duplicate removal is normal butil::Status st2 = RemovePeer(logicPoolId, copysetId, conf, removePeer, options); ASSERT_TRUE(st2.ok()); @@ -361,7 +361,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ch++, loop); - // 4. add回来 + // 4. Add it back conf.remove_peer(removePeer.address()); butil::Status st3 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); @@ -375,7 +375,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ch++, loop); - // 5. 重复add回来,验证重复添加的逻辑是否正常 + // 5. Repeat the add and verify if the logic added repeatedly is normal conf.add_peer(removePeer.address()); butil::Status st4 = AddPeer(logicPoolId, copysetId, conf, removePeer, options); @@ -389,7 +389,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeBasicAddAndRemovePeer) { ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } @@ -402,7 +402,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -434,7 +434,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -449,7 +449,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; @@ -466,18 +466,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); butil::Status st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()) << st2.error_str(); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -494,7 +494,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveShutdownPeer) { ch ++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } @@ -507,7 +507,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -539,7 +539,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -554,7 +554,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ch++, loop); - // 3. 移除此follower + // 3. Remove this follower braft::cli::CliOptions options; options.max_retry = 3; options.timeout_ms = confChangeTimeoutMs; @@ -571,16 +571,16 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ch++, loop); - // 4. 恢复follower + // 4. Restore follower LOG(INFO) << "recover hang follower"; ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); - // 5. add回来 + // 5. Add it back conf.remove_peer(shutdownPeer.address()); butil::Status st2 = AddPeer(logicPoolId, copysetId, conf, shutdownPeer, options); ASSERT_TRUE(st2.ok()); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -597,15 +597,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveHangPeer) { ch ++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. 3个节点正常启动 - * 2. 移除leader - * 3. 再将old leader添加回来 + * 1. 3 nodes start normally + * 2. Remove leader + * 3. Add the old leader back again */ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -615,7 +615,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -646,7 +646,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ch++, loop); - // 2. 移除leader + // 2. Remove leader LOG(INFO) << "remove leader"; braft::cli::CliOptions options; options.max_retry = 3; @@ -677,7 +677,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ch++, loop); - // 3. add回来 + // 3. Add it back conf.remove_peer(oldLeader.address()); butil::Status st3 = AddPeer(logicPoolId, copysetId, conf, oldLeader, options); @@ -695,16 +695,16 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveLeader) { ch++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 3); } /** - * 1. 3个节点正常启动 - * 2. 挂一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull it up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -714,7 +714,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -746,7 +746,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -761,7 +761,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; @@ -771,10 +771,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone down, + * the leader will automatically perform a term check and discover that + * the majority of followers are no longer connected. At this point, the leader will proactively step down, causing all requests to return as failures prematurely. + * Therefore, the assertions below may fail, but the removal itself will be successful. */ // ASSERT_TRUE(st1.ok()); ReadVerifyNotAvailable(leaderPeer, @@ -786,14 +786,14 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -810,7 +810,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { ch ++, loop); - // leader已经移除,所以只用验证2个副本数据一致性 + // The leader has been removed, so only the consistency of the data for two replicas is verified ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -822,10 +822,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenRemoveLeader) { } /** - * 1. 3个节点正常启动 - * 2. hang一个follower - * 3. 再将leader移除掉 - * 4. follower拉起来 + * 1. 3 nodes start normally + * 2. Hang a follower + * 3. Remove the leader again + * 4. pull up follower */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { LogicPoolID logicPoolId = 2; @@ -835,7 +835,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -867,7 +867,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -882,7 +882,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { ch++, loop); - // 3. 移除leader + // 3. Remove leader LOG(INFO) << "remove leader: " << leaderPeer.address(); braft::cli::CliOptions options; options.max_retry = 3; @@ -892,10 +892,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { st1 = RemovePeer(logicPoolId, copysetId, conf, leaderPeer, options); Peer oldLeader = leaderPeer; /** - * 一般能够移除成功,但是因为一个follower已经down了,那么 - * leader会自动进行check term,会发现已经有大多数的follower - * 已经失联,此时leader会主动step down,所以的request会提前 - * 返回失败,所以下面的断言会失败,但是移除本身会成功 + * Removal is generally successful, but if one follower has already gone down, + * the leader will automatically perform a term check and discover that + * the majority of followers are no longer connected. At this point, the leader will proactively step down, causing all requests to return as failures prematurely. + * Therefore, the assertions below may fail, but the removal itself will be successful. */ // ASSERT_TRUE(st1.ok()); ReadVerifyNotAvailable(leaderPeer, @@ -907,13 +907,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { 1); ::usleep(1000 * electionTimeoutMs * 2); - // 4. 拉起follower + // 4. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -930,7 +930,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { ch ++, loop); - // leader已经移除,所以验证2个副本数据一致性 + // The leader has been removed, so verify the data consistency of the two replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); std::vector newPeers; for (Peer peer : peers) { @@ -942,9 +942,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenRemoveLeader) { } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. 挂掉B,transfer leader给B - * 3. 拉起B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang up B, transfer leader to B + * 3. Pull up B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -954,7 +954,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -986,7 +986,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1021,7 +1021,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(shutdownPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 拉起follower,然后再把leader transfer过去 + // 4. Pull up the follower and then transfer the leader over LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -1050,7 +1050,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ASSERT_STREQ(shutdownPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1067,15 +1067,15 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerThenTransferLeaderTo) { ch ++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 1. {A、B、C} 3个节点正常启动,假设A是leader - * 2. hang B,transfer leader给B - * 3. 恢复 B,transfer leader给B + * 1. {A, B, C} three nodes start normally, assuming A is the leader + * 2. Hang B, transfer leader to B + * 3. Restore B, transfer leader to B */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { LogicPoolID logicPoolId = 2; @@ -1085,7 +1085,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1117,7 +1117,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ch++, loop); - // 2. hang1个follower + // 2. Hang 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1145,7 +1145,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STRNE(hangPeer.address().c_str(), leaderId.to_string().c_str()); - // 4. 恢复follower,然后再把leader transfer过去 + // 4. Restore the follower and then transfer the leader LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.SignalPeer(hangPeer)); @@ -1173,7 +1173,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ASSERT_STREQ(hangPeer.address().c_str(), leaderPeer.address().c_str()); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1190,17 +1190,17 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerThenTransferLeaderTo) { ch ++, loop); - // 验证3个副本数据一致性 + // Verify data consistency across 3 replicas ::usleep(waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** * - * 1. {A、B、C} 3个节点正常启 - * 2. 挂掉一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1210,7 +1210,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1242,7 +1242,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1257,10 +1257,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta ch++, loop); - // wait snapshot, 通过打两次快照确保后面的恢复必须走安装快照 + // Wait snapshot, ensuring that subsequent restores must follow the installation snapshot by taking two snapshots LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1270,7 +1270,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1279,7 +1279,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta ch ++, loop); - // 3. 拉起peer4 + // 3. Pull up peer4 ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1291,7 +1291,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()) << st.error_str(); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1324,10 +1324,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeShutdownPeerAndThenAddNewFollowerFromInsta /** * - * 1. {A、B、C} 3个节点正常启 - * 2. hang一个follower - * 3. 起一个节点D,Add D(需要额外确保通过snapshot恢复) - * 4. remove挂掉的follower + * 1. {A, B, C} three nodes start normally + * 2. Hang a follower + * 3. Start a node D, Add D (additional ensure recovery through snapshot) + * 4. Remove the failed follower */ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1337,7 +1337,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1369,7 +1369,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1384,10 +1384,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1397,7 +1397,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1406,7 +1406,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn ch ++, loop); - // 3. 拉起peer4 + // 3. Pull up peer4 ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1419,7 +1419,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn butil::Status st = AddPeer(logicPoolId, copysetId, conf, peer4, options); ASSERT_TRUE(st.ok()); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1451,9 +1451,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangPeerAndThenAddNewFollowerFromInstallSn } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log和数据 - * 3. 重启follower,follower能够通过数据恢复最终追上leader + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs and data + * 3. Restart the follower, and the follower can eventually catch up with the leader through data recovery */ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1463,7 +1463,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1495,7 +1495,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1510,7 +1510,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ch++, loop); - // 删除此peer的数据,然后重启 + // Delete the data for this peer and restart it ASSERT_EQ(0, ::system(PeerCluster::RemoveCopysetDirCmd(shutdownPeer).c_str())); std::shared_ptr @@ -1518,10 +1518,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ASSERT_FALSE(fs->DirExists(PeerCluster::CopysetDirWithoutProtocol( shutdownPeer))); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1531,7 +1531,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1540,13 +1540,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1568,9 +1568,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveDataAndThenRecoverFromInstallSnapsho } /** - * 1. {A、B、C} 3个节点正常启 - * 2. 挂了follower,并删除其所有raft log - * 3. 重启follower + * 1. {A, B, C} three nodes start normally + * 2. Hang up the follower and delete all its raft logs + * 3. Restart follower */ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnapshot) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1580,7 +1580,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1612,7 +1612,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1627,7 +1627,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap ch++, loop); - // 删除此peer的log,然后重启 + // Delete the log of this peer and restart it ::system(PeerCluster::RemoveCopysetLogDirCmd(shutdownPeer, logicPoolId, copysetId).c_str()); @@ -1637,10 +1637,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap logicPoolId, copysetId))); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1650,7 +1650,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1659,13 +1659,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1687,11 +1687,11 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRemoveRaftLogAndThenRecoverFromInstallSnap } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中挂掉leader - * 本次install snapshot失败,但是new leader会被选出来,new leader继续给 - * follower恢复数据,最终follower数据追上leader并一致 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot), and hang the leader during the recovery process + * The install snapshot failed this time, but the new leader will be selected and will continue to provide + * The follower recovers data, and ultimately the follower data catches up with the leader and is consistent */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderShutdown) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1701,7 +1701,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1733,7 +1733,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1748,10 +1748,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1761,7 +1761,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1770,13 +1770,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); @@ -1794,7 +1794,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1822,9 +1822,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader重启 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot), and restart the leader during the recovery process */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderRestart) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1834,7 +1834,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -1866,7 +1866,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1881,10 +1881,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1894,7 +1894,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1903,13 +1903,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the installation snapshot when the leader hangs up int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); @@ -1929,7 +1929,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1957,9 +1957,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中hang leader + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot), and hang the leader during the recovery process */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHang) { // NOLINT LogicPoolID logicPoolId = 2; @@ -1969,7 +1969,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2001,7 +2001,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2016,10 +2016,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2029,7 +2029,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2038,13 +2038,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader hang + // 4. After a period of random sleep, hang up the leader and simulate the leader hang during installation snapshot int sleepMs = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs); @@ -2062,7 +2062,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2090,9 +2090,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中leader hang一会 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot), and during the recovery process, the leader will hang for a while */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeaderHangMoment) { // NOLINT LogicPoolID logicPoolId = 2; @@ -2102,7 +2102,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2134,7 +2134,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2149,10 +2149,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2162,7 +2162,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2171,13 +2171,13 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // 4. 随机睡眠一段时间后,挂掉leader,模拟install snapshot的时候leader挂掉 + // 4. After a period of random sleep, hang up the leader and simulate the installation snapshot when the leader hangs up int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); @@ -2191,7 +2191,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); LOG(INFO) << "new leader is: " << leaderPeer.address(); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2219,10 +2219,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButLeade } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower挂了 - * 4. 一段时间后拉起来 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot), but the follower hung during the recovery process + * 4. After a period of time, pull it up */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerShutdown) { // NOLINT LogicPoolID logicPoolId = 2; @@ -2232,7 +2232,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2264,7 +2264,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2279,10 +2279,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2292,7 +2292,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2301,18 +2301,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the installation snapshot + // Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, @@ -2321,7 +2321,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2343,9 +2343,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower重启了 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot). During the recovery process, the follower restarted */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerRestart) { // NOLINT LogicPoolID logicPoolId = 2; @@ -2355,7 +2355,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2387,7 +2387,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2402,10 +2402,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2415,7 +2415,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2424,25 +2424,25 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,挂掉follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a random period of sleep, hang up the follower and simulate the installation snapshot + // Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 5. 把follower拉来 + // 5. Bring the follower here ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2464,10 +2464,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo } /** - * 1. {A、B、C} 3个节点正常启动 - * 2. 挂了follower, - * 3. 重启恢复follower(需要额外确保通过snapshot恢复),恢复过程中follower hang了 - * 4. 一段时间后恢复 + * 1. {A, B, C} 3 nodes start normally + * 2. Hang up the follower, + * 3. Restart to recover the follower (additional assurance is required to recover through snapshot), and the follower has changed during the recovery process + * 4. Restore after a period of time */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollowerHang) { // NOLINT LogicPoolID logicPoolId = 2; @@ -2477,7 +2477,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2509,7 +2509,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2524,10 +2524,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2537,7 +2537,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2546,18 +2546,18 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); - // 4. 随机睡眠一段时间后,hang follower,模拟install snapshot的时候 - // follower出现问题 + // 4. After a period of random sleep, hang the follower and simulate the installation snapshot + // Problem with follower int sleepMs1 = butil::fast_rand_less_than(maxWaitInstallSnapshotMs) + 1; ::usleep(1000 * sleepMs1); ASSERT_EQ(0, cluster.HangPeer(shutdownPeer)); - // 5. 把follower恢复 + // 5. Restore the follower int sleepMs2 = butil::fast_rand_less_than(1000) + 1; ::usleep(1000 * sleepMs2); ASSERT_EQ(0, cluster.SignalPeer(shutdownPeer)); @@ -2565,7 +2565,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2587,10 +2587,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshotButFollo } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2600,7 +2600,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2632,7 +2632,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2647,10 +2647,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2660,7 +2660,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2669,7 +2669,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -2703,7 +2703,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { ASSERT_EQ(0, ::strcmp(leaderPeer.address().c_str(), shutdownPeer.address().c_str())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2725,9 +2725,9 @@ TEST_F(RaftConfigChangeTest, ThreeNodeRecoverFollowerFromInstallSnapshot) { } /** - * 1. 创建5个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉两个follower - * 3. 让两个follower从installsnapshot恢复 + * 1. Create a replication group of 5 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up two followers + * 3. Restore two followers from installsnapshot */ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -2737,7 +2737,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { char ch = 'a'; int loop = 25; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 member LOG(INFO) << "start 5 chunkservers"; PeerId leaderId; Peer leaderPeer; @@ -2773,7 +2773,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { ch++, loop); - // 2. 挂掉2个follower + // 2. Hang up 2 followers LOG(INFO) << "shutdown 2 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2790,10 +2790,10 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { ch++, loop); - // wait snapshot, 保证能够触发安装快照 + // Wait snapshot to ensure that the installation snapshot can be triggered LOG(INFO) << "wait snapshot 1"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2803,7 +2803,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { loop); LOG(INFO) << "wait snapshot 2"; ::sleep(2 * snapshotIntervalS); - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2812,7 +2812,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { ch ++, loop); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart shutdown 2 follower"; ASSERT_EQ(0, cluster.StartPeer(shutdownPeer1, PeerCluster::PeerToId(shutdownPeer1))); @@ -2820,7 +2820,7 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { PeerCluster::PeerToId(shutdownPeer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2842,11 +2842,11 @@ TEST_F(RaftConfigChangeTest, FiveNodeRecoverTwoFollowerFromInstallSnapshot) { } /** - * 验证3个节点的复制组{A、B、C},并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2856,7 +2856,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; @@ -2886,7 +2886,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "shutdown 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -2901,7 +2901,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -2921,7 +2921,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2930,10 +2930,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -2954,11 +2954,11 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang follower + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -2968,7 +2968,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; @@ -2998,7 +2998,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 follower"; std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -3013,7 +3013,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3033,7 +3033,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -3042,10 +3042,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -3066,11 +3066,11 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangFollowerThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3080,7 +3080,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; @@ -3110,11 +3110,11 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader LOG(INFO) << "shutdown 1 leader"; Peer shutdownPeer = leaderPeer; ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); WriteThenReadVerify(leaderPeer, logicPoolId, @@ -3124,7 +3124,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3144,7 +3144,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -3153,10 +3153,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -3177,11 +3177,11 @@ TEST_F(RaftConfigChangeTest, ThreeNodeKillLeaderThenChangePeers) { } /** - * 验证3个节点的复制组{A、B、C},并Hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 变更配置为{A、B、D} - * 4. transfer leader 到 D,并读取数据验证 + * Verify the replication groups {A, B, C} of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang leader + * 3. Change the configuration to {A, B, D} + * 4. Transfer leader to D and read data validation */ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { LogicPoolID logicPoolId = 2; @@ -3191,7 +3191,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members LOG(INFO) << "start 3 chunkservers"; Peer leaderPeer; std::vector peers; @@ -3221,11 +3221,11 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ch++, // a loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower LOG(INFO) << "hang 1 leader"; Peer hangPeer = leaderPeer; ASSERT_EQ(0, cluster.HangPeer(hangPeer)); - // 等待新的leader产生 + // Waiting for a new leader to be generated ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); WriteThenReadVerify(leaderPeer, logicPoolId, @@ -3235,7 +3235,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ch++, // b loop); - // 3.拉起peer4并变更配置 + // 3. Pull up Peer4 and change the configuration ASSERT_EQ(0, cluster.StartPeer(peer4, PeerCluster::PeerToId(peer4))); ::sleep(2); @@ -3255,7 +3255,7 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -3264,10 +3264,10 @@ TEST_F(RaftConfigChangeTest, ThreeNodeHangLeaderThenChangePeers) { ch - 1, // b loop); - // transfer leader 到新加入的节点 + // Transfer leader to newly added node TransferLeaderAssertSuccess(&cluster, peer4, options); leaderPeer = peer4; - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, diff --git a/test/integration/raft/raft_log_replication_test.cpp b/test/integration/raft/raft_log_replication_test.cpp index f6a39c3436..54a8e08d21 100644 --- a/test/integration/raft/raft_log_replication_test.cpp +++ b/test/integration/raft/raft_log_replication_test.cpp @@ -266,18 +266,18 @@ class RaftLogReplicationTest : public testing::Test { int snapshotIntervalS; std::map paramsIndexs; std::vector params; - // 等待多个副本数据一致的时间 + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; butil::AtExitManager atExitManager; /** - * 验证3个节点的复制组,测试隐式提交 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 等带step down - * 3. 拉起1个follower + * Validate replication groups for 3 nodes and test implicit commit + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Wait for step down + * 3. Pull up 1 follower */ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { LogicPoolID logicPoolId = 2; @@ -287,7 +287,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -318,7 +318,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); @@ -332,7 +332,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { ch ++, 1); - // 3. 等待step down,等待2个选举超时,保证一定step down + // 3. Wait for step down, wait for 2 elections to timeout, ensure a certain step down ::usleep(1000 * electionTimeoutMs * 2); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -342,15 +342,15 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { ch - 1, 1); - // 4. 拉起1个follower + // 4. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); Peer newLeader; ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // new leader就是old leader + // new leader is an old leader ASSERT_STREQ(leaderPeer.address().c_str(), newLeader.address().c_str()); - // read step down之前append进去的log entry,测试隐式提交 + // Read the log entries appended before the "read step down" to test implicit commits. ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -378,11 +378,11 @@ TEST_F(RaftLogReplicationTest, ThreeNodeImplicitCommit) { } /** - * 验证3个节点的复制组,测试日志截断 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 挂掉leader - * 3. 拉起2个follower + * Verify the replication groups of three nodes and test log truncation + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Hang up the leader + * 3. Pull up 2 followers */ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { LogicPoolID logicPoolId = 2; @@ -392,7 +392,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -423,7 +423,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); @@ -437,11 +437,11 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ch++, 2); - // 3. 挂掉leader + // 3. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer oldLeader = leaderPeer; - // 4. 拉起2个follower + // 4. Pull up 2 followers ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], @@ -449,7 +449,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 日志截断 + // Log truncation ReadNotVerify(leaderPeer, logicPoolId, copysetId, @@ -477,12 +477,12 @@ TEST_F(RaftLogReplicationTest, ThreeNodeTruncateLog) { } /** - * 验证3个节点的复制组,测试向落后多个term的follower复制日志 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉一个follower - * 3. 挂掉leader,等待2个ET重启 - * 4. 挂掉leader,等待2个ET重启 - * 3. 拉起挂掉的follower + * Verify the replication group of three nodes, and test copying logs to followers who fall behind multiple terms + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up a follower + * 3. Hang up the leader and wait for 2 ETs to restart + * 4. Hang up the leader and wait for 2 ETs to restart + * 3. Pull up the hanging follower */ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { LogicPoolID logicPoolId = 2; @@ -492,7 +492,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { char ch = 'a'; int loop = 10; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -523,7 +523,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ch++, loop); - // 2. 挂掉1个Follower + // 2. Hang up 1 Follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); @@ -536,7 +536,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ch++, loop); - // 3. 挂掉leader,等待2个ET重启 + // 3. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); ASSERT_EQ(0, cluster.StartPeer(leaderPeer, @@ -551,7 +551,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ch++, loop); - // 4. 挂掉leader,等待2个ET重启 + // 4. Hang up the leader and wait for 2 ETs to restart ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ::usleep(1000 * electionTimeoutMs * 2); ASSERT_EQ(0, cluster.StartPeer(leaderPeer, @@ -566,7 +566,7 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ch++, loop); - // 5. 拉起挂掉的follower + // 5. Pull up the hanging follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -578,26 +578,26 @@ TEST_F(RaftLogReplicationTest, ThreeNodeLogReplicationToOldFollwer) { ch++, loop); - // 多等一会,保证安装快照成功 + //Wait a little longer to ensure successful installation of the snapshot ::usleep(1.3 * waitMultiReplicasBecomeConsistent * 1000); CopysetStatusVerify(peers, logicPoolId, copysetId, 2); } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. 挂掉leader - * 3. leader拉起来 - * 4. 挂1一个follower - * 5. follower拉起来 - * 6. 挂2个follower - * 7. 拉起1个follower - * 8. 挂掉leader - * 9. 拉起上一步挂的leader - * 10. 挂掉leader和两个follower - * 11. 逐个拉起来 - * 12. 挂掉3个follower - * 13. 逐个拉起来 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang up the leader + * 3. Pull up the leader + * 4. Hang 1 follower + * 5. Follower, pull it up + * 6. Hang 2 followers + * 7. Pull up 1 follower + * 8. Hang up the leader + * 9. Pull up the leader from the previous step + * 10. Hang up the leader and two followers + * 11. Pull up one by one + * 12. Hang up three followers + * 13. Pull up one by one */ TEST_F(RaftLogReplicationTest, FourNodeKill) { LogicPoolID logicPoolId = 2; @@ -607,7 +607,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -640,7 +640,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -662,7 +662,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { loop); - // 3. old leader拉起来 + // 3. Pull up the old leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); @@ -674,7 +674,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 3); @@ -687,7 +687,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // d loop); - // 5. follower拉起来 + // 5. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&newLeader)); @@ -699,7 +699,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); @@ -713,7 +713,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // f 1); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -726,7 +726,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -736,7 +736,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch - 1, 1); - // 9. 拉起上一步挂的leader + // 9. Pull up the leader from the previous step ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -749,7 +749,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -767,7 +767,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个拉起来 + // 11. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); @@ -800,7 +800,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { ch++, // i loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); @@ -816,7 +816,7 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { 1); ::usleep(1000 * electionTimeoutMs * 2); - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); @@ -855,20 +855,20 @@ TEST_F(RaftLogReplicationTest, FourNodeKill) { } /** - * 验证4个成员的复制组日志复制 - * 1. 4个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang1一个follower - * 5. 恢复follower - * 6. hang2个follower - * 7. 恢复1个follower - * 8. hangleader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 4 members + * 1. 4 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang1, a follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hangleader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FourNodeHang) { LogicPoolID logicPoolId = 2; @@ -878,7 +878,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动4个成员的复制组 + // 1. Start a replication group of 4 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -934,7 +934,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { loop); - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldLeader)); WriteThenReadVerify(newLeader, logicPoolId, @@ -944,7 +944,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); @@ -957,7 +957,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); WriteThenReadVerify(newLeader, logicPoolId, @@ -967,7 +967,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 3); @@ -981,7 +981,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch++, // f 1); -// 7. 恢复1个follower +// 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); @@ -1003,7 +1003,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch - 1, 1); - // 9. 恢复上一步挂的leader + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); @@ -1015,7 +1015,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1033,7 +1033,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { 1); ::usleep(1000 * electionTimeoutMs * 2); - // 11. 逐个恢复 + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); ReadVerifyNotAvailable(leaderPeer, @@ -1062,7 +1062,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { ch++, // j loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); @@ -1078,7 +1078,7 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { 1); - // 13. 逐个恢复 + // 13. Restore one by one ::usleep(1000 * electionTimeoutMs * 2); ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(-1, cluster.WaitLeader(&leaderPeer)); @@ -1111,20 +1111,20 @@ TEST_F(RaftLogReplicationTest, FourNodeHang) { } /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. 挂 leader - * 3. 恢复leader - * 4. 挂1一个follower - * 5. 恢复follower - * 6. 挂2个follower - * 7. 恢复1个follower - * 8. 挂leader - * 9. 恢复一步挂的leader - * 10. 挂leader和两个follower - * 11. 逐个恢复 - * 12. 挂3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang the leader + * 3. Restore leader + * 4. Hang 1 follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang the leader + * 9. Restore one-step suspended leaders + * 10. Hang the leader and two followers + * 11. Restore one by one + * 12. Hang 3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeKill) { LogicPoolID logicPoolId = 2; @@ -1134,7 +1134,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1169,7 +1169,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // a loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -1190,7 +1190,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { loop); - // 3. old leader拉起来 + // 3. Pull up the old leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1202,7 +1202,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // c loop); - // 4. 挂1一个follower + // 4. Hang 1 follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); @@ -1215,7 +1215,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // d loop); - // 5. follower拉起来 + // 5. Follower, pull it up ASSERT_EQ(0, cluster.StartPeer(followerPeers1[0], PeerCluster::PeerToId(followerPeers1[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1227,7 +1227,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // e loop); - // 6. 挂2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); @@ -1241,7 +1241,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // f loop); - // 7. 拉起1个follower + // 7. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers2[0], PeerCluster::PeerToId(followerPeers2[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1253,7 +1253,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // g loop); - // 8. 挂掉leader + // 8. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(newLeader)); ReadVerifyNotAvailable(newLeader, logicPoolId, @@ -1263,7 +1263,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch - 1, 1); - // 9. 拉起上一步挂的leader + // 9. Pull up the leader from the previous step ASSERT_EQ(0, cluster.StartPeer(newLeader, PeerCluster::PeerToId(newLeader))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1276,7 +1276,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // h loop); - // 10. 挂掉leader和两个follower + // 10. Hang up the leader and two followers ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1293,7 +1293,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch - 1, 1); - // 11. 逐个拉起来 + // 11. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1325,7 +1325,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { ch++, // k loop); - // 12. 挂掉3个follower + // 12. Hang up three followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); @@ -1341,7 +1341,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { 1); - // 13. 逐个拉起来 + // 13. Pull up one by one ASSERT_EQ(0, cluster.StartPeer(followerPeers3[0], PeerCluster::PeerToId(followerPeers3[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1378,20 +1378,20 @@ TEST_F(RaftLogReplicationTest, FiveNodeKill) { /** - * 验证5个成员的复制组日志复制 - * 1. 5个成员正常启动 - * 2. hang leader - * 3. 恢复leader - * 4. hang 1一个follower - * 5. 恢复follower - * 6. hang 2个follower - * 7. 恢复1个follower - * 8. hang leader - * 9. hang上一步hang的leader - * 10. hang leader和两个follower - * 11. 逐个恢复 - * 12. hang3个follower - * 13. 逐个恢复 + * Verify replication group log replication for 5 members + * 1. 5 members started normally + * 2. Hang leader + * 3. Restore leader + * 4. Hang 1, one follower + * 5. Restore follower + * 6. Hang 2 followers + * 7. Restore 1 follower + * 8. Hang leader + * 9. The leader of the previous step of hang + * 10. Hang leader and two followers + * 11. Restore one by one + * 12. Hang3 followers + * 13. Restore one by one */ TEST_F(RaftLogReplicationTest, FiveNodeHang) { LogicPoolID logicPoolId = 2; @@ -1401,7 +1401,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { char ch = 'a'; int loop = 10; - // 1. 启动5个成员的复制组 + // 1. Start a replication group of 5 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1457,7 +1457,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { loop); - // 3. 恢复old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); WriteThenReadVerify(newLeader, logicPoolId, @@ -1467,7 +1467,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch++, // c loop); - // 4. hang 1一个follower + // 4. Hang 1, one follower std::vector followerPeers1; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers1); ASSERT_GE(followerPeers1.size(), 1); @@ -1480,7 +1480,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch++, // d loop); - // 5. 恢复follower + // 5. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers1[0])); WriteThenReadVerify(newLeader, logicPoolId, @@ -1490,7 +1490,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch++, // e loop); - // 6. hang 2个follower + // 6. Hang 2 followers std::vector followerPeers2; PeerCluster::GetFollwerPeers(peers, newLeader, &followerPeers2); ASSERT_GE(followerPeers2.size(), 4); @@ -1504,7 +1504,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch++, // f loop); - // 7. 恢复1个follower + // 7. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers2[0])); WriteThenReadVerify(newLeader, logicPoolId, @@ -1524,7 +1524,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch - 1, 1); - // 9. 恢复上一步挂的leader + // 9. Restore the previous suspended leader ASSERT_EQ(0, cluster.SignalPeer(newLeader)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); @@ -1536,7 +1536,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch++, // h loop); - // 10. hang leader和两个follower + // 10. Hang leader and two followers ASSERT_EQ(0, cluster.HangPeer(leaderPeer)); Peer shutdownFollower; if (leaderPeer.address() != followerPeers2[0].address()) { @@ -1553,7 +1553,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch - 1, 1); - // 11. 逐个恢复 + // 11. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); WriteThenReadVerify(leaderPeer, @@ -1580,7 +1580,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { ch++, // k loop); - // 12. hang 3个follower + // 12. Hang 3 followers std::vector followerPeers3; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers3); ASSERT_GE(followerPeers3.size(), 3); @@ -1596,7 +1596,7 @@ TEST_F(RaftLogReplicationTest, FiveNodeHang) { 1); - // 13. 逐个恢复 + // 13. Restore one by one ASSERT_EQ(0, cluster.SignalPeer(followerPeers3[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); WriteThenReadVerify(leaderPeer, diff --git a/test/integration/raft/raft_snapshot_test.cpp b/test/integration/raft/raft_snapshot_test.cpp index a8e57aaa3f..9c276b5de6 100644 --- a/test/integration/raft/raft_snapshot_test.cpp +++ b/test/integration/raft/raft_snapshot_test.cpp @@ -194,7 +194,7 @@ class RaftSnapshotTest : public testing::Test { params_.push_back(const_cast(raftVoteParam[2])); params_.push_back(const_cast(raftVoteParam[3])); - // 配置默认raft client option + // Configure default raft client option defaultCliOpt_.max_retry = 3; defaultCliOpt_.timeout_ms = 10000; } @@ -237,15 +237,15 @@ class RaftSnapshotTest : public testing::Test { /** - * 验证连续通过快照恢复copyset - * 1.创建3个副本的复制组 - * 2.挂掉一个follower - * 3.写入数据,并等待raft snapshot 产生 - * 4.启动挂掉的follower,使其通过snapshot恢复 - * 5.transfer leader到刚启动的follower,读数据验证 - * 6.remove old leader,主要为了删除其copyset目录 - * 7.添加新的peer,使其通过快照加载数据 - * 8.transfer leader到新加入的peer,读数据验证 + *Verify continuous recovery of copyset through snapshots + *1. Create a replication group of 3 replicas + *2. Hang up a follower + *3. Write data and wait for the raft snapshot to be generated + *4. Start the failed follower and restore it through snapshot + *5. Transfer the leader to the newly started follower and read the data for verification + *6. Remove old leader, mainly to delete its copyset directory + *7. Add a new peer to load data through a snapshot + *8. Transfer leader to newly added peer, read data validation */ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { LogicPoolID logicPoolId = 2; @@ -276,7 +276,7 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); Peer oldLeader = leaderPeer; - // 挂掉一个follower + //Hang up a follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -288,7 +288,7 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 + // Initiate read/write to generate a chunk file WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -299,10 +299,10 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { initsn); LOG(INFO) << "write 1 end"; - // wait snapshot,保证能够触发打快照 + // wait snapshot to ensure that it can trigger a snapshot ::sleep(1.5*snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); @@ -310,11 +310,11 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { ::sleep(3); TransferLeaderAssertSuccess(&cluster, shutdownPeer, defaultCliOpt_); leaderPeer = shutdownPeer; - // 读数据验证 + // Read Data Validation ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); Configuration conf = cluster.CopysetConf(); - // 删除旧leader及其目录 + // Delete old leader and its directory butil::Status status = RemovePeer(logicPoolId, copysetId, conf, oldLeader, defaultCliOpt_); ASSERT_TRUE(status.ok()); @@ -322,31 +322,31 @@ TEST_F(RaftSnapshotTest, AddPeerRecoverFromSnapshot) { rmdir += std::to_string(PeerCluster::PeerToId(oldLeader)); ::system(rmdir.c_str()); - // 添加新的peer + // Add a new peer ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); status = AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()) << status; - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 + // Read Data Validation ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,然后 read 出来验证一遍 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval,write read 数据, - * 5. 然后再 sleep 超过一个 snapshot interval,write read 数据;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 6. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 7. transfer leader 到shut down 的peer 上 - * 8. 在 read 之前写入的数据验证 - * 9. 再 write 数据,再 read 出来验证一遍 + *Verify the shutdown of non leader nodes on three nodes, restart, and control the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and then read it out for verification + * 3. Shutdown non leader + * 4. Then sleep exceeds one snapshot interval, write read data, + * 5. Then sleep for more than one snapshot interval and write read data; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the node restarts again, it must pass the install snapshot + * 6. Wait for the leader to be generated, and then verify the data written before the read + * 7. Transfer leader to shut down peer + * 8. Verification of data written before read + * 9. Write the data again and read it out for verification */ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LogicPoolID logicPoolId = 2; @@ -377,7 +377,7 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 + // Initiate read/write to generate a chunk file WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -388,11 +388,11 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { initsn); LOG(INFO) << "write 1 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, and there may be outdated replica operations + // So take a nap first to prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -403,10 +403,10 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk file to generate a snapshot file ::sleep(1.5*snapshotIntervalS_); - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 2 start"; WriteThenReadVerify(leaderPeer, logicPoolId, @@ -417,7 +417,7 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { loop, initsn + 1); LOG(INFO) << "write 2 end"; - // 验证chunk快照数据正确性 + // Verify the correctness of chunk snapshot data ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, @@ -426,16 +426,16 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { ch, loop); - // wait snapshot, 保证能够触发打快照 + // wait snapshot to ensure that it can trigger a snapshot ::sleep(1.5*snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 3 start"; - // 再次发起 read/write + // Initiate read/write again WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -458,17 +458,17 @@ TEST_F(RaftSnapshotTest, ShutdownOnePeerRestartFromInstallSnapshot) { } /** - * 验证3个节点的关闭非 leader 节点,重启,控制让其从 install snapshot 恢复 - * 1. 创建3个副本的复制组 - * 2. 等待 leader 产生,write 数据,并更新写版本,产生chunk快照 - * 3. shutdown 非 leader - * 4. 然后 sleep 超过一个 snapshot interval, - * 5. 删除chunk快照,再次用新版本write 数据,产生新的chunk快照 - * 6. 然后再 sleep 超过一个 snapshot interval;4,5两步 - * 是为了保证打至少两次快照,这样,节点再重启的时候必须通过 install snapshot - * 7. 等待 leader 产生,然后 read 之前写入的数据验证一遍 - * 8. transfer leader 到shut down 的peer 上 - * 9. 在 read 之前写入的数据验证 + *Verify the shutdown of non leader nodes on three nodes, restart, and control the recovery from install snapshot + * 1. Create a replication group of 3 replicas + * 2. Wait for the leader to generate, write the data, and update the write version to generate a chunk snapshot + * 3. Shutdown non leader + * 4. Then the sleep exceeds one snapshot interval, + * 5. Delete the chunk snapshot and write the data again with a new version to generate a new chunk snapshot + * 6. Then sleep more than one snapshot interval; 4,5 two-step + * It is to ensure that at least two snapshots are taken, so that when the node restarts again, it must pass the install snapshot + * 7. Wait for the leader to be generated, and then verify the data written before the read + * 8. Transfer leader to shut down peer + * 9. Verification of data written before read */ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LogicPoolID logicPoolId = 2; @@ -499,7 +499,7 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 + // Initiate read/write to generate a chunk file WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -512,7 +512,7 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LOG(INFO) << "write 1 end"; LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 + // Initiate read/write, generate chunk files, and generate snapshot files WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -521,7 +521,7 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ++ch, // b loop, initsn+1); // sn = 2 - // 验证chunk快照数据正确性 + // Verify the correctness of chunk snapshot data ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, @@ -531,11 +531,11 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, and there may be outdated replica operations + // So take a nap first to prevent concurrent statistics of file information ::sleep(2); - // shutdown 某个follower + // shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -546,18 +546,18 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // wait snapshot, 保证能够触发打快照 - // 此外通过增加chunk版本号,触发chunk文件产生快照文件 + // wait snapshot to ensure that it can trigger a snapshot + // In addition, by increasing the chunk version number, trigger the chunk file to generate a snapshot file ::sleep(1.5*snapshotIntervalS_); - // 删除旧的快照 + // Delete old snapshots DeleteSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, initsn + 1); // csn = 2 - // 再次发起 read/write + // Initiate read/write again LOG(INFO) << "write 3 start"; WriteThenReadVerify(leaderPeer, logicPoolId, @@ -568,7 +568,7 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { loop, initsn + 2); // sn = 3 LOG(INFO) << "write 3 end"; - // 验证chunk快照数据正确性 + // Verify the correctness of chunk snapshot data ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, @@ -577,10 +577,10 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { ch-1, // b loop); - // wait snapshot, 保证能够触发打快照 + // wait snapshot to ensure that it can trigger a snapshot ::sleep(1.5*snapshotIntervalS_); - // restart, 需要从 install snapshot 恢复 + // restart, needs to be restored from install snapshot ASSERT_EQ(0, cluster.StartPeer(shutdownPeer, PeerCluster::PeerToId(shutdownPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -596,17 +596,17 @@ TEST_F(RaftSnapshotTest, DoCurveSnapshotAfterShutdownPeerThenRestart) { } /** - * 验证curve快照转储过程当中,chunkserver存在多个copyset情况下, - * 1. 创建3个副本的复制组 - * 2. 为每个复制组的chunkserver生成新的copyset,并作为后续操作对象 - * 3. 等待 leader 产生,write 数据 - * 4. sleep 超过一个 snapshot interval,确保产生raft快照 - * 5. 更新写版本,产生chunk快照 - * 6. 然后 sleep 超过一个 snapshot interval,确保产生raft快照 - * 7. shutdown 非 leader - * 8. AddPeer添加一个新节点使其通过加载快照恢复,然后remove掉shutdown的peer - * 9. 切换leader到新添加的peer - * 10. 等待 leader 产生,然后 read 之前产生的数据和chunk快照进行验证 + * During the process of verifying the curve snapshot dump, if there are multiple copysets in the chunkserver, + * 1. Create a replication group of 3 replicas + * 2. Generate a new copyset for each replication group's chunkserver and use it as a subsequent operation object + * 3. Wait for the leader to generate and write data + * 4. If the sleep exceeds one snapshot interval, ensure that a raft snapshot is generated + * 5. Update the write version to generate a chunk snapshot + * 6. Then the sleep exceeds one snapshot interval to ensure that a raft snapshot is generated + * 7. Shutdown non leader + * 8. Add a new node to AddPeer and restore it by loading a snapshot, then remove the shutdown peer + * 9. Switch the leader to the newly added peer + * 10. Wait for the leader to be generated, then read the data and chunk snapshot generated before validation */ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LogicPoolID logicPoolId = 2; @@ -633,7 +633,7 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { ASSERT_EQ(0, cluster.StartPeer(peer2_, PeerCluster::PeerToId(peer2_))); ASSERT_EQ(0, cluster.StartPeer(peer3_, PeerCluster::PeerToId(peer3_))); - // 创建新的copyset + // Create a new copyset LOG(INFO) << "create new copyset."; ++copysetId; int ret = cluster.CreateCopyset(logicPoolId, copysetId, peer1_, peers); @@ -643,14 +643,14 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { ret = cluster.CreateCopyset(logicPoolId, copysetId, peer3_, peers); ASSERT_EQ(0, ret); - // 使用新的copyset作为操作对象 + // Use the new copyset as the operand cluster.SetWorkingCopyset(copysetId); Peer leaderPeer; ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); LOG(INFO) << "write 1 start"; - // 发起 read/write,产生chunk文件 + // Initiate read/write to generate a chunk file WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -662,11 +662,11 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LOG(INFO) << "write 1 end"; - // wait snapshot, 保证能够触发打快照 + // Wait snapshot to ensure that it can trigger a snapshot ::sleep(1.5*snapshotIntervalS_); LOG(INFO) << "write 2 start"; - // 发起 read/write,产生chunk文件,并产生快照文件 + // Initiate read/write, generate chunk files, and generate snapshot files WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -675,7 +675,7 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { ++ch, // b loop, initsn+1); // sn = 2 - // 验证chunk快照数据正确性 + // Verify the correctness of chunk snapshot data ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, @@ -685,15 +685,15 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { loop); LOG(INFO) << "write 2 end"; - // raft内副本之间的操作并不是全部同步的,可能存在落后的副本操作 - // 所以先睡一会,防止并发统计文件信息 + // The operations between replicas within the raft are not all synchronized, and there may be outdated replica operations + // So take a nap first to prevent concurrent statistics of file information ::sleep(2); - // wait snapshot, 保证能够触发打快照 - // 通过至少两次快照,保证新加的peer通过下载快照安装 + // Wait snapshot to ensure that it can trigger a snapshot + // Ensure that the newly added peer is installed by downloading the snapshot by taking at least two snapshots ::sleep(1.5*snapshotIntervalS_); - // shutdown 某个follower + // Shutdown a certain follower Peer shutdownPeer; if (leaderPeer.address() == peer1_.address()) { shutdownPeer = peer2_; @@ -704,7 +704,7 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { LOG(INFO) << "leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(shutdownPeer)); - // 添加新的peer,并移除shutdown的peer + // Add a new peer and remove the shutdown peer Configuration conf = cluster.CopysetConf(); ASSERT_EQ(0, cluster.StartPeer(peer4_, PeerCluster::PeerToId(peer4_))); @@ -712,7 +712,7 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { AddPeer(logicPoolId, copysetId, conf, peer4_, defaultCliOpt_); ASSERT_TRUE(status.ok()); - // 删除旧leader及其目录 + // Delete old leader and its directory status = RemovePeer(logicPoolId, copysetId, conf, shutdownPeer, defaultCliOpt_); ASSERT_TRUE(status.ok()); @@ -720,10 +720,10 @@ TEST_F(RaftSnapshotTest, AddPeerWhenDoingCurveSnapshotWithMultiCopyset) { rmdir += std::to_string(PeerCluster::PeerToId(shutdownPeer)); ::system(rmdir.c_str()); - // transfer leader 到peer4_,并读出来验证 + // Transfer leader to peer4_, And read it out for verification TransferLeaderAssertSuccess(&cluster, peer4_, defaultCliOpt_); leaderPeer = peer4_; - // 读数据验证 + // Read Data Validation ReadVerify(leaderPeer, logicPoolId, copysetId, chunkId, length, ch, loop); ReadSnapshotVerify(leaderPeer, logicPoolId, copysetId, chunkId, diff --git a/test/integration/raft/raft_vote_test.cpp b/test/integration/raft/raft_vote_test.cpp index 5f87a1495f..a12219177d 100644 --- a/test/integration/raft/raft_vote_test.cpp +++ b/test/integration/raft/raft_vote_test.cpp @@ -190,7 +190,7 @@ class RaftVoteTest : public testing::Test { std::map paramsIndexs; std::vector params; - // 等待多个副本数据一致的时间 + // Waiting for multiple replica data to be consistent int waitMultiReplicasBecomeConsistent; }; @@ -199,12 +199,12 @@ class RaftVoteTest : public testing::Test { butil::AtExitManager atExitManager; /** - * 验证1个节点的复制组 - * 1. 创建1个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader,验证可用性 - * 3. 拉起leader - * 4. hang住leader - * 5. 恢复leader + * Verify replication group for 1 node + * 1. Create a replication group of 1 member, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the leader and verify availability + * 3. Pull up the leader + * 4. Hang in the leader + * 5. Restore leader */ TEST_F(RaftVoteTest, OneNode) { LogicPoolID logicPoolId = 2; @@ -214,7 +214,7 @@ TEST_F(RaftVoteTest, OneNode) { char ch = 'a'; int loop = 25; - // 1. 启动一个成员的复制组 + // 1. Start a replication group for a member PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -242,7 +242,7 @@ TEST_F(RaftVoteTest, OneNode) { ch++, loop); - // 2. 挂掉这个节点 + // 2. Hang up this node ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -252,12 +252,12 @@ TEST_F(RaftVoteTest, OneNode) { ch - 1, 1); - // 3. 将节点拉起来 + // 3. Pull up the node ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -274,7 +274,7 @@ TEST_F(RaftVoteTest, OneNode) { ch++, loop); - // 4. hang住此节点 + // 4. Hang on to this node ASSERT_EQ(0, cluster.HangPeer(peer1)); ::usleep(200 * 1000); ReadVerifyNotAvailable(leaderPeer, @@ -285,12 +285,12 @@ TEST_F(RaftVoteTest, OneNode) { ch - 1, 1); - // 5. 恢复节点 + // 5. Restore nodes ASSERT_EQ(0, cluster.SignalPeer(peer1)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -309,10 +309,10 @@ TEST_F(RaftVoteTest, OneNode) { } /** - * 验证2个节点的复制组,并挂掉leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of two nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -322,7 +322,7 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -351,7 +351,7 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -361,12 +361,12 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { ch - 1, 1); - // 3. 拉起leader + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -388,10 +388,10 @@ TEST_F(RaftVoteTest, TwoNodeKillLeader) { } /** - * 验证2个节点的复制组,并挂掉follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of two nodes and hang the follower + * 1. Create a replication group of 2 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LogicPoolID logicPoolId = 2; @@ -401,7 +401,7 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; @@ -431,7 +431,7 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { ch++, loop); - // 2. 挂掉follower + // 2. Hang up the follower Peer followerPeer; if (leaderPeer.address() == peer1.address()) { followerPeer = peer2; @@ -441,7 +441,7 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { LOG(INFO) << "kill follower " << followerPeer.address(); ASSERT_EQ(0, cluster.ShutdownPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 + // The request before the step down will eventually be submitted WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, @@ -449,7 +449,7 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { length, ch, 1); - // 等待leader step down,之后,也不支持read了 + // Wait for the leader step to down, and after that, read is no longer supported ::usleep(1000 * electionTimeoutMs * 2); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -459,14 +459,14 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { ch, 1); - // 3. 拉起follower + // 3. Pull up the follower LOG(INFO) << "restart follower " << followerPeer.address(); ASSERT_EQ(0, cluster.StartPeer(followerPeer, PeerCluster::PeerToId(followerPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write + // Verify the data written before read, and write before step down ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -488,10 +488,10 @@ TEST_F(RaftVoteTest, TwoNodeKillFollower) { } /** - * 验证2个节点的复制组,并hang leader - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication group of 2 nodes and hang the leader + * 1. Create a replication group of 2 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, TwoNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -501,7 +501,7 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -541,12 +541,12 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { ch - 1, 1); - // 3. 恢复leader + // 3. Restore leader LOG(INFO) << "recover leader peer: " << leaderPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -568,10 +568,10 @@ TEST_F(RaftVoteTest, TwoNodeHangLeader) { } /** - * 验证2个节点的复制组,并发Hang一个follower - * 1. 创建2个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang follower - * 3. 恢复follower + * Verify the replication group of two nodes and concurrently hang a follower + * 1. Create a replication group of 2 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang follower + * 3. Restore follower */ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LogicPoolID logicPoolId = 2; @@ -581,7 +581,7 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { char ch = 'a'; int loop = 25; - // 1. 启动2个成员的复制组 + // 1. Start a replication group of 2 members LOG(INFO) << "init 2 members copyset"; PeerId leaderId; Peer leaderPeer; @@ -621,7 +621,7 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { LOG(INFO) << "hang follower " << followerPeer.address(); ASSERT_EQ(0, cluster.HangPeer(followerPeer)); LOG(INFO) << "fill ch: " << std::to_string(ch - 1); - // step down之前的request,最终会被提交 + // The request before the step down will eventually be submitted WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, @@ -629,7 +629,7 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { length, ch, 1); - // 等待leader step down之后,也不支持read了 + // After waiting for the leader step to down, read is no longer supported ::usleep(1000 * electionTimeoutMs * 2); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -639,12 +639,12 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { ch, 1); - // 3. 恢复follower + // 3. Restore follower LOG(INFO) << "recover follower " << followerPeer.address(); ASSERT_EQ(0, cluster.SignalPeer(followerPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证,step down之前的write + // Verify the data written before read, and write before step down ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -666,8 +666,8 @@ TEST_F(RaftVoteTest, TwoNodeHangFollower) { } /** - * 验证3个节点是否能够正常提供服务 - * 1. 创建3个副本的复制组,等待leader产生,write数据,然后read出来验证一遍 + * Verify whether the three nodes can provide services normally + * 1. Create a replication group of three replicas, wait for the leader to generate, write the data, and then read it out for verification */ TEST_F(RaftVoteTest, ThreeNodesNormal) { LogicPoolID logicPoolId = 2; @@ -700,7 +700,7 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { PeerId leaderId; ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // 再次发起 read/write + //Initiate read/write agai WriteThenReadVerify(leaderPeer, logicPoolId, copysetId, @@ -714,10 +714,10 @@ TEST_F(RaftVoteTest, ThreeNodesNormal) { } /** - * 验证3个节点的复制组,并挂掉leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { LogicPoolID logicPoolId = 2; @@ -727,7 +727,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -758,7 +758,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { ch++, loop); - // 2. 挂掉leader + // 2. Hang up the leader ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -768,12 +768,12 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { ch - 1, 1); - // 3. 拉起leader + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -795,10 +795,10 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeader) { } /** - * 验证3个节点的复制组,并挂掉follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of three nodes and hang the follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { LogicPoolID logicPoolId = 2; @@ -808,7 +808,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -839,7 +839,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { ch++, loop); - // 2. 挂掉1个follower + // 2. Hang up 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); @@ -852,12 +852,12 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { ch++, loop); - // 3. 拉起follower + // 3. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -879,9 +879,9 @@ TEST_F(RaftVoteTest, ThreeNodeKillOneFollower) { } /** - * 验证3个节点的复制组,反复restart leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复restart leader + * Verify the replication group of three nodes and repeatedly restart the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Repeated restart leader */ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { LogicPoolID logicPoolId = 2; @@ -891,7 +891,7 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -938,7 +938,7 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -961,9 +961,9 @@ TEST_F(RaftVoteTest, ThreeNodeRestartLeader) { } /** - * 验证3个节点的复制组,反复重启一个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 反复重启follower + * Verify the replication groups of three nodes and restart a follower repeatedly + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Repeatedly restarting the follower */ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { LogicPoolID logicPoolId = 2; @@ -973,7 +973,7 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1004,7 +1004,7 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { ch++, loop); - // 2. 反复 restart follower + // 2. Repeatedly restart follower for (int i = 0; i < 5; ++i) { std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1028,11 +1028,11 @@ TEST_F(RaftVoteTest, ThreeNodeRestartFollower) { } /** - * 验证3个节点的复制组,并挂掉leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉leader和1个follwoer - * 3. 拉起leader - * 4. 拉起follower + * Verify the replication groups of three nodes and hang the leader and one follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the leader and 1 follower + * 3. Pull up the leader + * 4. Pull up the follower */ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1042,7 +1042,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1073,7 +1073,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { ch++, loop); - // 2. 挂掉leader和Follower + // 2. Hang up the leader and follower ASSERT_EQ(0, cluster.ShutdownPeer(leaderPeer)); std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); @@ -1087,12 +1087,12 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { ch - 1, 1); - // 3. 拉起leader + // 3. Pull up the leader ASSERT_EQ(0, cluster.StartPeer(leaderPeer, PeerCluster::PeerToId(leaderPeer))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1109,7 +1109,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1126,11 +1126,11 @@ TEST_F(RaftVoteTest, ThreeNodeKillLeaderAndOneFollower) { } /** - * 验证3个节点的复制组,并挂掉2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉2个follower - * 3. 拉起1个follower - * 4. 拉起1个follower + * Verify the replication groups of three nodes and hang two followers + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up 2 followers + * 3. Pull up 1 follower + * 4. Pull up 1 follower */ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1140,7 +1140,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1171,7 +1171,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { ch++, loop); - // 2. 挂掉2个Follower + // 2. Hang 2 Followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); @@ -1185,12 +1185,12 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { ch - 1, 1); - // 3. 拉起1个follower + // 3. Pull up 1 follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[0], PeerCluster::PeerToId(followerPeers[0]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1207,7 +1207,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { ch++, loop); - // 4. 拉起follower + // 4. Pull up the follower ASSERT_EQ(0, cluster.StartPeer(followerPeers[1], PeerCluster::PeerToId(followerPeers[1]))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1224,12 +1224,12 @@ TEST_F(RaftVoteTest, ThreeNodeKillTwoFollower) { } /** - * 验证3个节点的复制组,并挂掉3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉3个成员 - * 3. 拉起1个成员 - * 4. 拉起1个成员 - * 5. 拉起1个成员 + * Verify the replication group of 3 nodes and suspend 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Pull up 1 member + * 4. Pull up 1 member + * 5. Pull up 1 member */ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { LogicPoolID logicPoolId = 2; @@ -1239,7 +1239,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1270,7 +1270,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.ShutdownPeer(peer1)); ASSERT_EQ(0, cluster.ShutdownPeer(peer2)); @@ -1283,7 +1283,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { ch - 1, 1); - // 3. 拉起1个成员 + // 3. Pull up 1 member ASSERT_EQ(0, cluster.StartPeer(peer1, PeerCluster::PeerToId(peer1))); @@ -1297,7 +1297,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { 1); - // 4. 拉起1个成员 + // 4. Pull up 1 member ASSERT_EQ(0, cluster.StartPeer(peer2, PeerCluster::PeerToId(peer2))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1311,7 +1311,7 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { ch++, loop); - // 5. 再拉起1个成员 + // 5. Pull up one more member ASSERT_EQ(0, cluster.StartPeer(peer3, PeerCluster::PeerToId(peer3))); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); @@ -1332,10 +1332,10 @@ TEST_F(RaftVoteTest, ThreeNodeKillThreeMember) { /** - * 验证3个节点的复制组,并hang leader - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader - * 3. 恢复leader + * Verify the replication groups of three nodes and hang the leader + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang leader + * 3. Restore leader */ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { LogicPoolID logicPoolId = 2; @@ -1345,7 +1345,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1387,7 +1387,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { ch - 1, 1); - // 等待new leader产生 + // Waiting for new leader generation ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); WriteThenReadVerify(leaderPeer, @@ -1398,9 +1398,9 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { ch++, loop); - // 3. 恢复 old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(oldPeer)); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1423,10 +1423,10 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeader) { /** - * 验证3个节点的复制组,并hang1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. 挂掉follower - * 3. 恢复follower + * Verify the replication groups of 3 nodes and hang 1 follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang up the follower + * 3. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { LogicPoolID logicPoolId = 2; @@ -1436,7 +1436,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1467,7 +1467,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { ch++, loop); - // 2. hang 1个follower + // 2. Hang 1 follower std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 1); @@ -1480,9 +1480,9 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { ch++, loop); - // 3. 恢复follower + // 3. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1504,11 +1504,11 @@ TEST_F(RaftVoteTest, ThreeNodeHangOneFollower) { } /** - * 验证3个节点的复制组,并hang leader和1个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang leader和1个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of three nodes and hang the leader and one follower + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang leader and 1 follower + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { LogicPoolID logicPoolId = 2; @@ -1518,7 +1518,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1563,11 +1563,11 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { ch - 1, 1); - // 3. 恢复 old leader + // 3. Restore old leader ASSERT_EQ(0, cluster.SignalPeer(leaderPeer)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); - // read之前写入的数据验证 + // Verification of data written before read ReadVerify(leaderPeer, logicPoolId, copysetId, @@ -1584,7 +1584,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { ch++, loop); - // 4. 恢复follower + // 4. Restore follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); ReadVerify(leaderPeer, logicPoolId, @@ -1607,11 +1607,11 @@ TEST_F(RaftVoteTest, ThreeNodeHangLeaderAndOneFollower) { } /** - * 验证3个节点的复制组,并hang 2个follower - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang两个follower - * 3. 恢复old leader - * 4. 恢复follower + * Verify the replication groups of 3 nodes and hang 2 followers + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang two followers + * 3. Restore old leader + * 4. Restore follower */ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { LogicPoolID logicPoolId = 2; @@ -1621,7 +1621,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1652,13 +1652,13 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ch++, loop); - // 2. hang 2个follower + // 2. Hang 2 followers std::vector followerPeers; PeerCluster::GetFollwerPeers(peers, leaderPeer, &followerPeers); ASSERT_GE(followerPeers.size(), 2); ASSERT_EQ(0, cluster.HangPeer(followerPeers[0])); ASSERT_EQ(0, cluster.HangPeer(followerPeers[1])); - // step down之前提交request会超时 + // Submitting a request before the step down will timeout WriteVerifyNotAvailable(leaderPeer, logicPoolId, copysetId, @@ -1667,7 +1667,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ch ++, 1); - // 等待step down之后,读也不可提供服务 + // After waiting for the step down, reading is not available for service ::usleep(1000 * electionTimeoutMs * 2); ReadVerifyNotAvailable(leaderPeer, logicPoolId, @@ -1677,7 +1677,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ch - 1, 1); - // 3. 恢复1个follower + // 3. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[0])); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); @@ -1698,7 +1698,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { ch++, loop); - // 4. 恢复1个follower + // 4. Restore 1 follower ASSERT_EQ(0, cluster.SignalPeer(followerPeers[1])); ReadVerify(leaderPeer, logicPoolId, @@ -1721,12 +1721,12 @@ TEST_F(RaftVoteTest, ThreeNodeHangTwoFollower) { } /** - * 验证3个节点的复制组,并hang 3个成员 - * 1. 创建3个成员的复制组,等待leader产生,write数据,然后read出来验证一遍 - * 2. hang 3个成员 - * 3. 恢复1个成员 - * 4. 恢复1个成员 - * 5. 恢复1个成员 + * Verify the replication group of 3 nodes and hang 3 members + * 1. Create a replication group of 3 members, wait for the leader to generate, write the data, and then read it out for verification + * 2. Hang 3 members + * 3. Restore 1 member + * 4. Restore 1 member + * 5. Restore 1 member */ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { LogicPoolID logicPoolId = 2; @@ -1736,7 +1736,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { char ch = 'a'; int loop = 25; - // 1. 启动3个成员的复制组 + // 1. Start a replication group of 3 members PeerId leaderId; Peer leaderPeer; std::vector peers; @@ -1767,7 +1767,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { ch++, loop); - // 2. 挂掉3个成员 + // 2. Hang 3 members std::vector followerPeers; ASSERT_EQ(0, cluster.HangPeer(peer1)); ASSERT_EQ(0, cluster.HangPeer(peer2)); @@ -1787,7 +1787,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { ch - 1, 1); - // 3. 恢复1个成员 + // 3. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer1)); ::usleep(1000 * electionTimeoutMs * 2); ReadVerifyNotAvailable(leaderPeer, @@ -1799,7 +1799,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { 1); - // 4. 恢复1个成员 + // 4. Restore 1 member ASSERT_EQ(0, cluster.SignalPeer(peer2)); ASSERT_EQ(0, cluster.WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); @@ -1820,7 +1820,7 @@ TEST_F(RaftVoteTest, ThreeNodeHangThreeMember) { ch++, loop); - // 5. 再恢复1个成员 + // 5. Restore 1 more member ASSERT_EQ(0, cluster.SignalPeer(peer3)); WriteThenReadVerify(leaderPeer, diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp index 49191fdd40..92f54e7830 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.cpp +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.cpp @@ -38,7 +38,7 @@ const uint64_t fileLength = 64ULL * 1024 * 1024; int FakeCurveFsClient::Init(const CurveClientOptions &options) { - // 初始化一个文件用打快照和克隆 + // Initialize a file for snapshot and cloning FInfo fileInfo; fileInfo.id = 100; fileInfo.parentid = 3; @@ -125,7 +125,7 @@ int FakeCurveFsClient::GetSnapshotSegmentInfo(const std::string &filename, segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { @@ -295,7 +295,7 @@ int FakeCurveFsClient::GetOrAllocateSegmentInfo( segInfo->segmentsize = segmentSize; segInfo->chunksize = chunkSize; segInfo->startoffset = offset; - // 一共2个segment + // 2 segments in total if (offset == 0) { segInfo->chunkvec = {{1, 1, 1}, {2, 2, 1}}; } else { diff --git a/test/integration/snapshotcloneserver/fake_curvefs_client.h b/test/integration/snapshotcloneserver/fake_curvefs_client.h index 0f3a0a6107..8ac776de42 100644 --- a/test/integration/snapshotcloneserver/fake_curvefs_client.h +++ b/test/integration/snapshotcloneserver/fake_curvefs_client.h @@ -155,10 +155,10 @@ class FakeCurveFsClient : public CurveFsClient { const std::string& newOwner) override; /** - * @brief 判断/clone目录下是否存在临时文件 + * @brief Check if there are temporary files under the /clone directory. * - * @retval true 存在 - * @retval false 不存在 + * @retval true If they exist. + * @retval false If they do not exist. */ bool JudgeCloneDirHasFile(); @@ -169,11 +169,11 @@ class FakeCurveFsClient : public CurveFsClient { // fileName -> snapshot fileInfo std::map fileSnapInfoMap_; - // inodeid 从101开始,100以内预留 - // 快照所属文件Id一律为100, parentid = 99 - // "/" 目录的Id为1 - // "/clone" 目录的Id为2 - // "/user1" 目录的Id为3 + // Inode IDs start from 101, with numbers under 100 reserved. + // Snapshot file IDs are always 100, with a parentid = 99. + // The ID for the "/" directory is 1. + // The ID for the "/clone" directory is 2. + // The ID for the "/user1" directory is 3. std::atomic fileId_; }; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp index 8eff45065c..e27b93ef36 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_common_test.cpp @@ -240,13 +240,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ "--name=" + kTestPrefix }); @@ -259,13 +259,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -302,7 +302,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -321,7 +321,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/config/" @@ -395,7 +395,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + // Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -495,23 +495,23 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 常规测试用例 -// 场景一:快照增加删除查找 +// Regular test cases +// Scenario 1: Adding, deleting, and searching snapshots TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { std::string uuid1; int ret = 0; - // 操作1:用户testUser1_对不存在的文件打快照 - // 预期1:返回文件不存在 + // Step1: User testUser1_ Take a snapshot of non-existent files + // Expected 1: Return file does not exist ret = MakeSnapshot(testUser1_, "/ItUser1/notExistFile", "snap1", &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:用户testUser2_对testFile1_打快照 - // 预期2:返回用户认证失败 + // Step2: User testUser2_ For testFile1_ Take a snapshot + // Expected 2: Failed to return user authentication ret = MakeSnapshot(testUser2_, testFile1_, "snap1", &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:用户testUser1_对testFile1_打快照snap1。 - // 预期3:打快照成功 + // Step3: User testUser1_ For testFile1_ Take a snapshot snap1. + // Expected 3: Successful snapshot taking ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); ASSERT_EQ(0, ret); @@ -519,56 +519,56 @@ TEST_F(SnapshotCloneServerTest, TestSnapshotAddDeleteGet) { ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4: 获取快照信息,user=testUser1_,filename=testFile1_ - // 预期4:返回快照snap1的信息 + // Step4: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 4: Return information for snapshot snap1 bool success1 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_TRUE(success1); - // 操作5:获取快照信息,user=testUser2_,filename=testFile1_ - // 预期5:返回用户认证失败 + // Step5: Obtain snapshot information, user=testUser2_, filename=testFile1_ + // Expected 5: User authentication failure returned FileSnapshotInfo info1; ret = GetSnapshotInfo(testUser2_, testFile1_, uuid1, &info1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作6:获取快照信息,user=testUser2_,filename=testFile2_ - // 预期6:返回空 + // Step6: Obtain snapshot information, user=testUser2_, filename=testFile2_ + // Expected 6: Return null std::vector infoVec; ret = ListFileSnapshotInfo(testUser2_, testFile2_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作7:testUser2_删除快照snap1 - // 预期7:返回用户认证失败 + // Step7: testUser2_ Delete snapshot snap1 + // Expected 7: User authentication failure returned ret = DeleteSnapshot(testUser2_, testFile1_, uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作8:testUser1_删除testFile2_的快照,ID为snap1 - // 预期8:返回文件名不匹配 + // Step8: testUser1_ Delete testFile2_ Snapshot with ID snap1 for + // Expected 8: Return file name mismatch ret = DeleteSnapshot(testUser1_, testFile2_, uuid1); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); - // 操作9:testUser1_删除快照snap1 - // 预期9:返回删除成功 + // Step9: testUser1_ Delete snapshot snap1 + // Expected 9: Successful deletion returned ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, ret); - // 操作10:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期10:返回空 + // Step10: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 10: Return empty ret = ListFileSnapshotInfo(testUser1_, testFile1_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作11:testUser1_删除快照snap1(重复删除) - // 预期11:返回删除成功 + // Step11: testUser1_ Delete snapshot snap1 (duplicate deletion) + // Expected 11: Successful deletion returned ret = DeleteAndCheckSnapshotSuccess(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, ret); - // 复原testFile1_ + //Restore testFile1_ std::string fakeData2(4096, 'x'); ASSERT_TRUE(WriteFile(testFile1_, testUser1_, fakeData2)); } -// 场景二:取消快照 +// Scenario 2: Cancel Snapshot TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snapToCancle", &uuid1); @@ -583,29 +583,29 @@ TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { if (info1.GetSnapshotInfo().GetStatus() == Status::pending || info1.GetSnapshotInfo().GetStatus() == Status::canceling) { if (!isCancel) { - // 操作1:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser2_取消testFile1_的快照snap1 - // 预期1:取消用户认证失败 + // Step1: User testUser1_ For testFile1_ Take a snapshot snap1, + // testUser2_ before the snapshot is completed_ Cancel testFile1_ Snap1 of snapshot + // Expected 1: Failed to cancel user authentication int retCode = CancelSnapshot(testUser2_, testFile1_, uuid1); ASSERT_EQ(kErrCodeInvalidUser, retCode); - // 操作2:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile1_ - // 的不存在的快照 - // 预期2:返回kErrCodeCannotCancelFinished + // Step2: User testUser1_ For testFile1_ Take a snapshot snap1, + // testUser1_ before the snapshot is completed_ Cancel testFile1_ + // A non-existent snapshot of + // Expected 2: Return kErrCodeCannotCancelFinished retCode = CancelSnapshot(testUser1_, testFile1_, "notExistUUId"); ASSERT_EQ(kErrCodeCannotCancelFinished, retCode); - // 操作3:用户testUser1_对testFile1_打快照snap1, - // 在快照未完成前testUser1_取消testFile2_的快照snap1 - // 预期3: 返回文件名不匹配 + // Step3: User testUser1_ For testFile1_ Take a snapshot snap1, + // testUser1_ before the snapshot is completed_ Cancel testFile2_ Snap1 of snapshot + // Expected 3: Return file name mismatch retCode = CancelSnapshot(testUser1_, testFile2_, uuid1); ASSERT_EQ(kErrCodeFileNameNotMatch, retCode); - // 操作4:用户testUser1_对testFile1_打快照, - // 在快照未完成前testUser1_取消快照snap1 - // 预期4:取消快照成功 + // Step4: User testUser1_ For testFile1_ Take a snapshot, + // testUser1_ before the snapshot is completed_ Cancel snapshot snap1 + // Expected 4: Successfully cancelled snapshot retCode = CancelSnapshot(testUser1_, testFile1_, uuid1); ASSERT_EQ(0, retCode); isCancel = true; @@ -620,47 +620,47 @@ TEST_F(SnapshotCloneServerTest, TestCancelSnapshot) { << static_cast(info1.GetSnapshotInfo().GetStatus()); } } else if (retCode == -8) { - // 操作5:获取快照信息,user=testUser1_,filename=testFile1_ - // 预期5:返回空 + // Step5: Obtain snapshot information, user=testUser1_, filename=testFile1_ + // Expected 5: Return empty success1 = true; break; } } ASSERT_TRUE(success1); - // 操作6: 在快照已完成后,testUser1_取消testFile1_的快照snap1 - // 预期6: 返回待取消的快照不存在或已完成 + // Step6: After the snapshot is completed, testUser1_ Cancel testFile1_ Snap1 of snapshot + // Expected 6: Returning a pending snapshot that does not exist or has been completed ret = CancelSnapshot(testUser1_, testFile1_, uuid1); ASSERT_EQ(kErrCodeCannotCancelFinished, ret); } -// 场景三:lazy快照克隆场景 +// Scenario 3: Lazy snapshot clone scene TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ clone不存在的快照,fileName=SnapLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ A snapshot with a clone that does not exist, fileName=SnapLazyClone1 + // Expected 1: Return snapshot does not exist std::string uuid1, uuid2, uuid3, uuid4, uuid5; int ret; ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId1", "/ItUser1/SnapLazyClone1", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ clone快照snap1,fileName=SnapLazyClone1 - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 2: User authentication failure returned ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapLazyClone1", true, &uuid2); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ clone 快照snap1,fileName=SnapLazyClone1 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapLazyClone1 + // Expected 3 to return successful cloning std::string dstFile = "/ItUser1/SnapLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid3); ASSERT_EQ(0, ret); - // 操作4: testUser1_ clone 块照snap1,fileName=SnapLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) + // Step4: testUser1_ Clone block photo snap1, fileName=SnapLazyClone1 (duplicate clone) + // Expected 4: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapLazyClone1", true, &uuid4); ASSERT_EQ(0, ret); @@ -669,68 +669,68 @@ TEST_F(SnapshotCloneServerTest, TestSnapLazyClone) { ret = Flatten(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 操作5: testUser1_ GetCloneTask - // 预期5:返回SnapLazyClone1的clone 任务 + // Step5: testUser1_ GetCloneTask + // Expected 5: Return clone task for SnapLazyClone1 bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid3, true); ASSERT_TRUE(success1); - // 操作6: testUser2_ GetCloneTask - // 预期6: 返回空 + // Step6: testUser2_ GetCloneTask + // Expected 6: Return null std::vector infoVec; ret = ListCloneTaskInfo(testUser2_, 10, 0, &infoVec); ASSERT_EQ(0, ret); ASSERT_EQ(0, infoVec.size()); - // 操作7: testUser2_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期7:返回用户认证失败 + // Step7: testUser2_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 7: User authentication failure returned ret = CleanCloneTask(testUser2_, uuid3); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作8: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID - // 预期8:返回执行成功 + // Step8: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 + // Expected 8: Return execution successful ret = CleanCloneTask(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 等待清理完成 + // Waiting for cleaning to complete std::this_thread::sleep_for(std::chrono::seconds(3)); - // 操作9: testUser1_ CleanCloneTask UUID为SnapLazyClone1的UUID(重复执行) - // 预期9:返回执行成功 + // Step9: testUser1_ CleanCloneTask UUID is the UUID of SnapLazyClone1 (repeated execution) + // Expected 9: Return execution successful ret = CleanCloneTask(testUser1_, uuid3); ASSERT_EQ(0, ret); - // 操作10:testUser1_ GetCloneTask - // 预期10:返回空 + // Step10: testUser1_ GetCloneTask + // Expected 10: Return empty TaskCloneInfo info; ret = GetCloneTaskInfo(testUser1_, uuid3, &info); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景四:非lazy快照克隆场景 +// Scenario 4: Non lazy snapshot clone scenario TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ clone不存在的快照,fileName=SnapNotLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ A snapshot with a clone that does not exist, fileName=SnapNotLazyClone1 + // Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Clone", testUser1_, "UnExistSnapId2", "/ItUser1/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ clone快照snap1,fileName=SnapNotLazyClone1 - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 2: User authentication failure returned ret = CloneOrRecover("Clone", testUser2_, snapId, "/ItUser2/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ clone 快照snap1,fileName=SnapNotLazyClone1 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone snapshot snap1, fileName=SnapNotLazyClone1 + // Expected 3 to return successful cloning std::string dstFile = "/ItUser1/SnapNotLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, snapId, dstFile, false, &uuid1); ASSERT_EQ(0, ret); @@ -738,39 +738,39 @@ TEST_F(SnapshotCloneServerTest, TestSnapNotLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); - // 操作4: testUser1_ clone 块照snap1, - // fileName=SnapNotLazyClone1 (重复克隆) - // 预期4:返回克隆成功(幂等) + // Step4: testUser1_ Clone block photo snap1, + // fileName=SnapNotLazyClone1 (duplicate clone) + // Expected 4: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser1/SnapNotLazyClone1", false, &uuid1); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景五:lazy快照恢复场景 +// Scenario 5: Lazy snapshot recovery scenario TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 + // Step1: testUser1_ Recover snapshot that does not exist, fileName=testFile1_ + // Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId3", testFile1_, true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, true, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, true, &uuid1); ASSERT_EQ(0, ret); @@ -782,38 +782,38 @@ TEST_F(SnapshotCloneServerTest, TestSnapLazyRecover) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent file + // Expected 4: Return target file does not exist ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); } -// 场景六:非lazy快照恢复场景 +// Scenario 6: Non lazy snapshot recovery scenario TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); - // 操作1: testUser1_ Recover不存在的快照,fileName=testFile1_ - // 预期1:返回快照不存在 + // Step1: testUser1_ Recover snapshot that does not exist, fileName=testFile1_ + // Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Recover", testUser1_, "UnExistSnapId4", testFile1_, false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser2_ Recover快照snap1,fileName=testFile1_ - // 预期2: 返回用户认证失败 + // Step2: testUser2_ Recover snapshot snap1, fileName=testFile1_ + // Expected 2: User authentication failure returned ret = CloneOrRecover("Recover", testUser2_, snapId, testFile1_, false, &uuid1); ASSERT_EQ(kErrCodeInvalidUser, ret); - // 操作3:testUser1_ Recover快照snap1,fileName=testFile1_ - // 预期3 返回恢复成功 + // Step3: testUser1_ Recover snapshot snap1, fileName=testFile1_ + // Expected 3 return recovery success ret = CloneOrRecover("Recover", testUser1_, snapId, testFile1_, false, &uuid1); ASSERT_EQ(0, ret); @@ -821,43 +821,43 @@ TEST_F(SnapshotCloneServerTest, TestSnapNotLazyRecover) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); - // 操作4:testUser1_ recover 快照snap1,目标文件为不存在的文件 - // 预期4: 返回目标文件不存在 + // Step4: testUser1_ Recover snapshot snap1, target file is a non-existent file + // Expected 4: Return target file does not exist ret = CloneOrRecover("Recover", testUser1_, snapId, "/ItUser1/notExistFile", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); } -// 场景七: lazy镜像克隆场景 +// Scenario 7: Lazy Mirror Clone Scene TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageLazyClone1 - // 预期1:返回文件不存在 + // Step1: testUser1_ Clone does not exist in an image, fileName=ImageLazyClone1 + // Expected 1: Return file does not exist std::string uuid1, uuid2, uuid3, uuid4; int ret; ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", "/ItUser1/ImageLazyClone1", true, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageLazyClone1 - // 预期2 返回克隆成功 + // Step2: testUser1_ Clone image testFile1_, fileName=ImageLazyClone1 + // Expected 2 to return successful cloning std::string dstFile = "/ItUser1/ImageLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); ASSERT_EQ(0, ret); - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, testFile1_, "/ItUser1/ImageLazyClone1", true, &uuid3); ASSERT_EQ(0, ret); - // 操作4:对未完成lazy克隆的文件ImageLazyClone1打快照snap1 - // 预期4:返回文件状态异常 + // Step4: Take a snapshot snap1 of the file ImageLazyClone1 that has not completed the lazy clone + // Expected 4: Abnormal file status returned ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid4); ASSERT_EQ(kErrCodeFileStatusInvalid, ret); FileSnapshotInfo info2; @@ -866,7 +866,7 @@ TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid2, true)); - // Flatten之前验证数据正确性 + // Verify data correctness before Flatten std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -877,23 +877,23 @@ TEST_F(SnapshotCloneServerTest, TestImageLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid2, true); ASSERT_TRUE(success1); - // Flatten之后验证数据正确性 + // Verify data correctness after Flatten std::string fakeData2(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); } -// 场景八:非lazy镜像克隆场景 +// Scenario 8: Non Lazy Mirror Clone Scene TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { - // 操作1: testUser1_ clone不存在的镜像,fileName=ImageNotLazyClone1 - // 预期1:返回快照不存在 + // Step1: testUser1_ Clone does not exist in an image, fileName=ImageNotLazyClone1 + // Expected 1: Return snapshot does not exist std::string uuid1; int ret; ret = CloneOrRecover("Clone", testUser1_, "/UnExistFile", "/ItUser1/ImageNotLazyClone1", false, &uuid1); ASSERT_EQ(kErrCodeFileNotExist, ret); - // 操作2:testUser1_ clone 镜像testFile1_,fileName=ImageNotLazyClone1 - // 预期2 返回克隆成功 + // Step2: testUser1_ Clone image testFile1_, fileName=ImageNotLazyClone1 + // Expected 2 to return successful cloning std::string dstFile = "/ItUser1/ImageNotLazyClone1"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, false, &uuid1); @@ -902,19 +902,19 @@ TEST_F(SnapshotCloneServerTest, TestImageNotLazyClone) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); - // 操作3: testUser1_ clone 镜像testFile1_, - // fileName=ImageNotLazyClone1 (重复克隆) - // 预期3:返回克隆成功(幂等) + // Step3: testUser1_ Clone image testFile1_, + // FileName=ImageNotLazyClone1 (duplicate clone) + // Expected 3: Returns successful cloning (idempotent) ret = CloneOrRecover("Clone", testUser1_, testFile1_, "/ItUser1/ImageNotLazyClone1", false, &uuid1); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 场景九:快照存在失败场景 +// Scenario 9: The snapshot has a failure scenario TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { std::string snapId = "errorSnapUuid"; SnapshotInfo snapInfo(snapId, testUser1_, testFile4_, "snapxxx", 0, 0, 0, 0, @@ -927,114 +927,114 @@ TEST_F(SnapshotCloneServerTest, TestSnapAndCloneWhenSnapHasError) { ASSERT_GT(pid, 0); std::string uuid1, uuid2; - // 操作1: lazy clone 快照snap1 - // 预期1:返回快照存在异常 + // Step1: lazy clone snapshot snap1 + // Expected 1: Exception in returning snapshot int ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser2/SnapLazyClone1", true, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作2:非lazy clone 快照snap1 - // 预期2:返回快照存在异常 + // Step2: Non lazy clone snapshot snap1 + // Expected 2: Exception in returning snapshot ret = CloneOrRecover("Clone", testUser1_, snapId, "/ItUser2/SnapNotLazyClone1", false, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作3:lazy 从 快照snap1 recover - // 预期3:返回快照存在异常 + // Step3: lazy snap1 recover from snapshot + // Expected 3: Exception in returning snapshot ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, true, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作4:非lazy 从 快照snap1 recover - // 预期4:返回快照存在异常 + // Step4: Snap1 recover from snapshot without lazy + // Expected 4: Exception in returning snapshot ret = CloneOrRecover("Recover", testUser1_, snapId, testFile4_, false, &uuid2); ASSERT_EQ(kErrCodeInvalidSnapshot, ret); - // 操作5:用户testUser1_对testFile4_打快照snap1 - // 预期5:清理失败快照,并打快照成功 + // Step5: User testUser1_ For testFile4_ Take a snapshot snap1 + // Expectation 5: Clean failed snapshot and take snapshot successfully ret = MakeSnapshot(testUser1_, testFile4_, "snap1", &uuid1); ASSERT_EQ(0, ret); - // 校验快照成功 + // Successfully verified snapshot bool success1 = CheckSnapshotSuccess(testUser1_, testFile4_, uuid1); ASSERT_TRUE(success1); - // 校验清理失败快照成功 + // Verification cleaning failed, snapshot succeeded FileSnapshotInfo info1; int retCode = GetSnapshotInfo(testUser1_, testFile4_, snapId, &info1); ASSERT_EQ(kErrCodeFileNotExist, retCode); } -// [线上问题修复]克隆失败,回滚删除克隆卷,再次创建同样的uuid的卷的场景 +//[Online issue repair] Clone failed, rollback delete clone volume, and create the same uuid volume again scenario TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { std::string uuid1, uuid2, uuid3, uuid4, uuid5, uuid6, uuid7; - // 操作1:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDestUUID - // 预期1 返回克隆成功 + // Step1: testUser1_ Clone image testFile1_, fileName=CloneHasSameDestUUID + // Expected 1 to return successful cloning std::string dstFile = "/ItUser1/CloneHasSameDest"; int ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid1); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo; userinfo.owner = testUser1_; int ret2 = fileClient_->Unlink(dstFile, userinfo, false); ASSERT_EQ(0, ret2); - // 操作2:testUser1_ 再次clone 镜像testFile1_, + // Step2: testUser1_ Clone image testFile1_ again, // fileName=CloneHasSameDestUUID - // 预期2 返回克隆成功 + // Expected 2 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid2); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness std::string fakeData(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 操作3:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest2 - // 预期3 返回克隆成功 + // Step3: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest2 + // Expected 3 to return successful cloning dstFile = "/ItUser1/CloneHasSameDest2"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid3); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo2; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作4:testUser1_ 再次clone 镜像testFile2_, + // Step4: testUser1_ Clone the image testFile2_ again, // fileName=CloneHasSameDest2 - // 预期4 返回克隆成功 + // Expected 4 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid4); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 验证再次克隆lazyflag不同的情况 - // 操作5:testUser1_ clone 镜像testFile1_,fileName=CloneHasSameDest3 - // 预期5 返回克隆成功 + // Verify different situations when cloning lazyflag again + // Step5: testUser1_ Clone image testFile1_, fileName=CloneHasSameDest3 + // Expected 5 to return successful cloning dstFile = "/ItUser1/CloneHasSameDest3"; ret = CloneOrRecover("Clone", testUser1_, testFile1_, dstFile, true, &uuid5); ASSERT_EQ(0, ret); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo3; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作6:testUser1_ 再次非lazy clone 镜像testFile2_, + // Step6: testUser1_ Non lazy clone image testFile2_ again, // fileName=CloneHasSameDest3 - // 预期6 返回克隆成功 + // Expected 6 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, false, &uuid6); ASSERT_EQ(0, ret); @@ -1042,30 +1042,30 @@ TEST_F(SnapshotCloneServerTest, TestCloneHasSameDest) { bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid6, true); ASSERT_TRUE(success1); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 删除克隆卷 + // Delete Clone Volume UserInfo_t userinfo4; userinfo2.owner = testUser1_; ret2 = fileClient_->Unlink(dstFile, userinfo2, false); ASSERT_EQ(0, ret2); - // 操作7:testUser1_ 再次非lazy clone 镜像testFile2_, + // Step7: testUser1_ Non lazy clone image testFile2_ again, // fileName=CloneHasSameDest3 - // 预期7 返回克隆成功 + // Expected 7 to return successful cloning ret = CloneOrRecover("Clone", testUser1_, testFile2_, dstFile, true, &uuid7); ASSERT_EQ(0, ret); - // 验证数据正确性 + // Verify data correctness ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// lazy克隆卷,删除克隆卷,再删除源卷,源卷需要可以删除 +// Lazy clone volume, delete clone volume, and then delete source volume. The source volume can be deleted if needed TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { - // 操作1:testUser1_ clone 镜像testFile5_,lazy克隆两个卷dstFile1,dstFile2 - // 预期1 返回克隆成功 + // Step1: testUser1_ Clone image testFile5_, Lazy clone two volumes dstFile1 and dstFile2 + // Expected 1 to return successful cloning std::string uuid1; std::string uuid2; std::string dstFile1 = "/dest1"; @@ -1080,13 +1080,13 @@ TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { CloneOrRecover("Clone", testUser1_, testFile5_, dstFile2, true, &uuid2); ASSERT_EQ(0, ret); - // 删除源卷,删除失败,卷被占用 + // Delete source volume, deletion failed, volume occupied ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(-27, ret); - // 操作2:删除目的卷dstFile1成功,再次删除源卷 - // 预期2 删除失败,卷被占用 + // Step2: Successfully delete the destination volume dstFile1, delete the source volume again + // Expected 2 deletion failed, volume occupied ret = fileClient_->Unlink(dstFile1, userinfo, false); ASSERT_EQ(0, ret); @@ -1094,15 +1094,15 @@ TEST_F(SnapshotCloneServerTest, TestDeleteLazyCloneDestThenDeleteSrc) { ASSERT_EQ(-27, ret); - // 操作3:删除目的卷dstFile2成功,再次删除源卷 - // 预期3 删除成功 + // Step3: Successfully delete the destination volume dstFile2, delete the source volume again + // Expected 3 deletion successful ret = fileClient_->Unlink(dstFile2, userinfo, false); ASSERT_EQ(0, ret); ret = fileClient_->Unlink(testFile5_, userinfo, false); ASSERT_EQ(0, ret); - // 操作4: 等待一段时间,看垃圾记录后台能否删除 + // Step4: Wait for a period of time to see if the garbage record can be deleted in the background bool noRecord = false; for (int i = 0; i < 100; i++) { TaskCloneInfo info; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp index 55484d7ec3..2e893d5c78 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_concurrent_test.cpp @@ -195,7 +195,7 @@ const std::vector snapshotcloneserverConfigOptions{ std::string("server.clonePoolThreadNum=8"), std::string("server.createCloneChunkConcurrency=2"), std::string("server.recoverChunkConcurrency=2"), - // 最大快照数修改为3,以测试快照达到上限的用例 + //Modify the maximum number of snapshots to 3 to test cases where snapshots reach the upper limit std::string("server.maxSnapshotLimit=3"), std::string("client.methodRetryTimeSec=1"), std::string("server.clientAsyncMethodRetryTimeSec=1"), @@ -221,7 +221,7 @@ const std::vector clientConfigOptions{ const char* testFile1_ = "/concurrentItUser1/file1"; const char* testFile2_ = - "/concurrentItUser1/file2"; // 将在TestImage2Clone2Success中删除 //NOLINT + "/concurrentItUser1/file2"; //Will be removed from TestImage2Clone2Success//NOLINT const char* testFile3_ = "/concurrentItUser2/file3"; const char* testFile4_ = "/concurrentItUser1/file3"; const char* testUser1_ = "concurrentItUser1"; @@ -239,13 +239,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + //Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + //Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ "--name=" + kTestPrefix }); @@ -256,13 +256,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + //Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + //Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -299,7 +299,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + //Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -318,7 +318,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + //Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -386,7 +386,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << testfd1_; return false; } - // 每个chunk写前面4k数据, 写两个segment + //Write the first 4k data and two segments for each chunk uint64_t totalChunk = 2ULL * segmentSize / chunkSize; for (uint64_t i = 0; i < totalChunk / chunkGap; i++) { ret = @@ -486,9 +486,9 @@ class SnapshotCloneServerTest : public ::testing::Test { CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; -// 并发测试用例 +//Concurrent test cases -// 这个用例测试快照层数,放在最前面 +//This use case tests the number of snapshot layers, placed at the top TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { std::string uuid1, uuid2, uuid3; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -506,7 +506,7 @@ TEST_F(SnapshotCloneServerTest, TestSameFile3Snapshot) { bool success3 = CheckSnapshotSuccess(testUser1_, testFile1_, uuid3); ASSERT_TRUE(success3); - // 快照层数设置为3,尝试再打一次快照,超过层数失败 + //Set the number of snapshot layers to 3. Attempt to take another snapshot, exceeding the number of layers failed ret = MakeSnapshot(testUser1_, testFile1_, "snap3", &uuid3); ASSERT_EQ(kErrCodeSnapshotCountReachLimit, ret); @@ -585,7 +585,7 @@ TEST_F(SnapshotCloneServerTest, TestSnapSameClone1Success) { ret1 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid1); ASSERT_EQ(0, ret1); - // 幂等 + //Idempotent ret2 = CloneOrRecover("Clone", testUser1_, snapId, dstFile, true, &uuid2); ASSERT_EQ(0, ret2); @@ -732,7 +732,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + //Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -747,7 +747,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WaitMetaInstalledSuccess(testUser1_, uuid1, true)); - // clone完成stage1之后即可对外提供服务,测试克隆卷是否能正常读取数据 + //After the clone completes stage1, it can provide external services and test whether the cloned volume can read data normally std::string fakeData1(4096, 'x'); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData1)); @@ -759,7 +759,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyCloneImage) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData2)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData2)); - // 判断是否clone成功 + //Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, true); ASSERT_TRUE(success1); } @@ -782,7 +782,7 @@ TEST_F(SnapshotCloneServerTest, TestReadWriteWhenLazyRecoverSnap) { ASSERT_TRUE(WriteFile(dstFile, testUser1_, fakeData)); ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); - // 判断是否clone成功 + //Determine if the clone was successful bool success1 = CheckCloneOrRecoverSuccess(testUser1_, uuid1, false); ASSERT_TRUE(success1); } diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp index 326ebe66c0..334e1e85de 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_exception_test.cpp @@ -104,7 +104,7 @@ class SnapshotCloneServerTest : public ::testing::Test { const std::string &file, const std::string &uuid, SnapshotInfo *snapInfo) { - // 验证任务失败 + // Verification task failed FileSnapshotInfo info1; int ret = GetSnapshotInfo( user, file, uuid, &info1); @@ -126,7 +126,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); if (ret != -1) { @@ -148,7 +148,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } int seqNum = snapInfo.GetSeqNum(); - // 验证curve上无快照 + // Verify that there are no snapshots on the curve FInfo fInfo; int ret = server_->GetCurveFsClient()->GetSnapshot( file, user, seqNum, &fInfo); @@ -158,7 +158,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证nos上无快照 + // Verify that there are no snapshots on NOS ChunkIndexDataName indexData(file, seqNum); if (server_->GetDataStore()->ChunkIndexDataExist(indexData)) { LOG(INFO) << "AssertEnvClean Fail, snapshot exist on nos."; @@ -170,7 +170,7 @@ class SnapshotCloneServerTest : public ::testing::Test { bool JudgeCloneTaskFailCleanEnvAndCheck( const std::string &user, const std::string &uuid) { - // 验证任务状态为error + // Verify that the task status is error TaskCloneInfo info1; int ret = GetCloneTaskInfo( user, uuid, &info1); @@ -191,7 +191,7 @@ class SnapshotCloneServerTest : public ::testing::Test { bool JudgeCloneTaskNotExistCleanEnvAndCheck( const std::string &user, const std::string &uuid) { - // 验证任务不存在 + // Verification task does not exist TaskCloneInfo info1; int ret = GetCloneTaskInfo( user, uuid, &info1); @@ -201,7 +201,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" << ", ret = " << ret; @@ -222,7 +222,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::milliseconds(3000)); - // 验证任务不存在 + // Verification task does not exist TaskCloneInfo info; ret = GetCloneTaskInfo(user, uuid, &info); if (kErrCodeFileNotExist != ret) { @@ -231,7 +231,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return false; } - // 验证curvefs上无临时文件 + // Verify that there are no temporary files on curvefs if (server_->GetCurveFsClient()->JudgeCloneDirHasFile()) { LOG(INFO) << "AssertEnvClean fail" << ", ret = " << ret; @@ -419,13 +419,13 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnAddSnapshot) { fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot", // NOLINT 1, NULL, 0); - // 验证任务失败 + // Verification task failed int ret = MakeSnapshot(user, file , "snap8", &uuid); ASSERT_EQ(-1, ret); fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.AddSnapshot"); // NOLINT - // 验证任务不存在 + // Verification task does not exist SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(-1, ret); @@ -446,7 +446,7 @@ TEST_F(SnapshotCloneServerTest, TestCreateSnapshotFailOnUpdateSnapshot) { fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.UpdateSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed FileSnapshotInfo info1; ret = GetSnapshotInfo( user, file, uuid, &info1); @@ -506,13 +506,13 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnGetChunkIndexData) { fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.GetChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -529,13 +529,13 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkData) { fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -552,13 +552,13 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteChunkIndexData) { fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotDataStore.DeleteChunkIndexData"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -575,13 +575,13 @@ TEST_F(SnapshotCloneServerTest, TestDeleteSnapshotFailOnDeleteSnapshot) { fiu_enable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot", // NOLINT 1, NULL, 0); - // 验证删除失败 + // Verification deletion failed int ret = DeleteAndCheckSnapshotSuccess(user, file, uuid); ASSERT_EQ(-1, ret); fiu_disable("test/integration/snapshotcloneserver/FakeSnapshotCloneMetaStore.DeleteSnapshot"); // NOLINT - // 验证任务失败 + // Verification task failed SnapshotInfo sinfo; ret = server_->GetMetaStore()->GetSnapshotInfo(uuid, &sinfo); ASSERT_EQ(0, ret); @@ -783,7 +783,7 @@ TEST_F(SnapshotCloneServerTest, ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); @@ -1080,7 +1080,7 @@ TEST_F(SnapshotCloneServerTest, ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 克隆未完成前删除目标文件 + // Delete target file before cloning is completed ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile("/user1/clone1", "", 0)); @@ -1490,7 +1490,7 @@ TEST_F(SnapshotCloneServerTest, ret = Flatten(testUser1, uuid1); ASSERT_EQ(0, ret); - // 恢复未完成前删除目标文件 + // Delete target files before recovery is complete ASSERT_EQ(LIBCURVE_ERROR::OK, server_->GetCurveFsClient()->DeleteFile(testFile1, "", 0)); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp index 2e549688b8..dffa096e93 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_module.cpp @@ -108,7 +108,7 @@ int SnapshotCloneServerModule::Start( return kErrCodeServerInitFail; } - // 先启动clone服务再启动snapshot服务,因为删除快照依赖是否有clone引用 + // Start the clone service first and then the snapshot service, because there is a clone reference when deleting snapshot dependencies int ret = cloneServiceManager_->Start(); if (ret < 0) { LOG(ERROR) << "cloneServiceManager start fail" diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index 18a113ef0f..be2d4914be 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -49,10 +49,10 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkSplitSize = 8388608; -// 测试文件只写2个segment +//Write only 2 segments in the test file const uint64_t testFile1AllocSegmentNum = 2; -// 一些常数定义 +//Some constant definitions const char *cloneTempDir_ = "/clone"; const char *mdsRootUser_ = "root"; const char *mdsRootPassword_ = "root_password"; @@ -261,13 +261,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + //Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + //Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ "--name=" + kTestPrefix }); @@ -280,13 +280,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + //Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -323,7 +323,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -342,7 +342,7 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -403,7 +403,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << *fdOut; return false; } - // 2个segment,每个写第一个chunk + //2 segments, each with the first chunk written for (uint64_t i = 0; i < testFile1AllocSegmentNum; i++) { ret = fileClient_->Write(*fdOut, dataSample.c_str(), i * segmentSize, dataSample.size()); @@ -427,7 +427,7 @@ class SnapshotCloneServerTest : public ::testing::Test { userinfo.owner = user; int ret = 0; - // 检查文件状态 + // Check file status FInfo fileInfo; ret = snapClient_->GetFileInfo(fileName, userinfo, &fileInfo); if (ret < 0) { @@ -512,9 +512,9 @@ class SnapshotCloneServerTest : public ::testing::Test { bool IsRecover = false) { uint64_t seqNum = 1; if (IsRecover) { - seqNum = 2; // 恢复新文件使用版本号+1 + seqNum = 2; // Restore new files using version number+1 } else { - seqNum = 1; // 克隆新文件使用初始版本号1 + seqNum = 1; // Clone new files using initial version number 1 } int ret = snapClient_->CreateCloneFile( testFile1_, fileName, @@ -554,11 +554,11 @@ class SnapshotCloneServerTest : public ::testing::Test { name.chunkIndex_ = i * segmentSize / chunkSize; std::string location = LocationOperator::GenerateS3Location(name.ToDataChunkKey()); - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Create第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Create全部的chunk。 + //Due to the fact that each segment in the test file only writes the first chunk, + //Snapshots can only dump the currently written chunks, + //So cloning each segment from the snapshot only creates the first chunk. + //And when cloning from a file, because mds doesn't know if chunk has been written or not, + //So we need to create all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; SnapCloneCommonClosure *cb = new SnapCloneCommonClosure(tracker); @@ -570,8 +570,8 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 2; int ret = snapClient_->CreateCloneChunk( location, cidInfo, - 1, // 恢复使用快照中chunk的版本号 - 2, // 恢复使用新文件的版本号, 即原文件版本号+1 + 1, // Restore the version number of the chunk in the snapshot + 2, // Restore the version number of the new file, which is the original file version number+1 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -594,8 +594,8 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 0; int ret = snapClient_->CreateCloneChunk(location, cidInfo, - 1, // 克隆使用初始版本号1 - 0, // 克隆使用0 + 1, // Clone using initial version number 1 + 0, // Clone using 0 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -629,11 +629,11 @@ class SnapshotCloneServerTest : public ::testing::Test { auto tracker = std::make_shared(); if (IsSnapshot) { for (int i = 0; i < testFile1AllocSegmentNum; i++) { - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Recover第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Recover全部的chunk。 + //Due to the fact that each segment in the test file only writes the first chunk, + //Snapshots can only dump the currently written chunks, + //So clone each segment from the snapshot and only recover the first chunk. + //And when cloning from a file, because mds doesn't know if chunk has been written or not, + //So we need to recover all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { SnapCloneCommonClosure *cb = @@ -715,7 +715,7 @@ CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; SnapshotClient *SnapshotCloneServerTest::snapClient_ = nullptr; -// 未在curve中创建快照阶段,重启恢复 +//Failed to create snapshot phase in curve, restart recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, @@ -739,10 +739,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,但成功结果未返回,重启恢复 +//A snapshot has been created in the curve, but the successful result has not been returned. Restart for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsNotReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); @@ -768,10 +768,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,结果已返回,重启恢复 +//A snapshot has been created in the curve, and the results have been returned. Restart to recover TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); @@ -796,7 +796,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照阶段,nos上传部分快照,重启恢复 +// The snapshot phase has been created in the curve. NOS uploads some snapshots and restarts for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -811,7 +811,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { } if (info1.GetSnapshotInfo().GetStatus() == Status::pending) { if (info1.GetSnapProgress() > kProgressTransferSnapshotDataStart) { - // 当进度到达转储的百分比时重启 + // Restart when the progress reaches the percentage of the dump pid_t pid = cluster_->RestartSnapshotCloneServer(1, true); LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; ASSERT_GT(pid, 0); @@ -835,7 +835,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上未创建文件 +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; @@ -859,7 +859,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasCreateCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -889,7 +889,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -917,7 +917,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -951,7 +951,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -983,7 +983,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneChunkSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1019,7 +1019,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1053,7 +1053,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1091,7 +1091,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1127,7 +1127,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1166,7 +1166,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1204,7 +1204,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1246,7 +1246,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1286,7 +1286,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1329,7 +1329,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1371,7 +1371,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneRenameCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1419,8 +1419,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 以下为Lazy模式用例 -// CreateCloneFile阶段重启,mds上未创建文件 +// The following are the Lazy pattern use cases +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1450,7 +1450,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasCreateCloneFileSuccessNotReturn) { std::string snapId; @@ -1486,7 +1486,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1521,7 +1521,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneMetaSuccessNotReturn) { std::string snapId; @@ -1561,7 +1561,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1600,7 +1600,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneChunkSuccessNotReturn) { std::string snapId; @@ -1642,7 +1642,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1683,7 +1683,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneMetaSuccessNotReturn) { std::string snapId; @@ -1727,7 +1727,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1770,7 +1770,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyChangeOwnerSuccessNotReturn) { std::string snapId; @@ -1816,7 +1816,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1861,7 +1861,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRenameCloneFileSuccessNotReturn) { std::string snapId; @@ -1910,7 +1910,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1953,7 +1953,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRecoverChunkSuccssNotReturn) { std::string snapId; @@ -1999,7 +1999,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -2044,7 +2044,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneFileSuccessNotReturn) { std::string snapId; diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp index f56bae71e7..7df117835e 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_test.cpp @@ -35,7 +35,7 @@ const std::string kTestPrefix = "MainSCSTest"; // NOLINT -// 一些常数定义 +// Some constant definitions const char* cloneTempDir_ = "/clone"; const char* mdsRootUser_ = "root"; const char* mdsRootPassword_ = "root_password"; @@ -135,11 +135,11 @@ class SnapshotCloneServerMainTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db std::string rmcmd = "rm -rf " + std::string(kEtcdName) + ".etcd"; system(rmcmd.c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{"--name=" + std::string(kEtcdName)}); @@ -150,7 +150,7 @@ class SnapshotCloneServerMainTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; @@ -201,7 +201,7 @@ TEST_F(SnapshotCloneServerMainTest, testmain) { std::this_thread::sleep_for(std::chrono::seconds(2)); - // 测试验证是否状态为active + // Test and verify if the status is active // "curl "127.0.0.1:port/vars/snapshotcloneserver_status""; std::string cmd = "curl \"127.0.0.1:" + std::string(kSnapshotCloneServerDummyServerPort) + diff --git a/test/kvstorageclient/etcdclient_test.cpp b/test/kvstorageclient/etcdclient_test.cpp index ff92a579f3..f17ea39024 100644 --- a/test/kvstorageclient/etcdclient_test.cpp +++ b/test/kvstorageclient/etcdclient_test.cpp @@ -43,7 +43,7 @@ using ::curve::mds::NameSpaceStorageCodec; using ::curve::mds::PageFileChunkInfo; using ::curve::mds::PageFileSegment; -// 接口测试 +// Interface testing class TestEtcdClinetImp : public ::testing::Test { protected: TestEtcdClinetImp() {} @@ -63,8 +63,8 @@ class TestEtcdClinetImp : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", @@ -75,7 +75,7 @@ class TestEtcdClinetImp : public ::testing::Test { exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); bool initSuccess = false; while (::curve::common::TimeUtility::GetTimeofDaySec() - now <= 50) { @@ -108,8 +108,8 @@ class TestEtcdClinetImp : public ::testing::Test { TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { // 1. put file - // - file0~file9 put到etcd中 - // - file6有快照 + // - file0~file9 put into etcd + // - file6 has a snapshot std::map keyMap; std::map fileName; FileInfo fileInfo7, fileInfo8; @@ -170,7 +170,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } } - // 2. get file, 可以正确获取并解码file0~file9 + // 2. get file, which can correctly obtain and decode file0~file9 for (int i = 0; i < keyMap.size(); i++) { std::string out; int errCode = client_->Get(keyMap[i], &out); @@ -180,7 +180,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], fileinfo.filename()); } - // 3. list file, 可以list到file0~file9 + // 3. list file, which can be listed to file0~file9 std::vector listRes; std::vector> listRes2; int errCode = client_->List("01", "02", &listRes2); @@ -193,7 +193,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(fileName[i], finfo.filename()); } - // 4. delete file, 删除file0~file4,这部分文件不能再获取到 + // 4. Delete file, delete file0~file4, these files cannot be retrieved anymore for (int i = 0; i < keyMap.size() / 2; i++) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Delete(keyMap[i])); // can not get delete file @@ -201,7 +201,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[i], &out)); } - // 5. rename file: rename file9 ~ file10, file10本来不存在 + // 5. Rename file: rename file9~file10, file10 does not originally exist Operation op1{OpType::OpDelete, const_cast(keyMap[9].c_str()), const_cast(fileInfo9.c_str()), static_cast(keyMap[9].size()), @@ -279,9 +279,9 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { ops.emplace_back(op8); ops.emplace_back(op9); ASSERT_EQ(EtcdErrCode::EtcdOK, client_->TxnN(ops)); - // 不能获取 file7 + // Unable to obtain file7 ASSERT_EQ(EtcdErrCode::EtcdKeyNotExist, client_->Get(keyMap[7], &out)); - // 成功获取rename以后的file7 + // Successfully obtained file7 after renam ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Get(keyMap[8], &out)); ASSERT_TRUE(NameSpaceStorageCodec::DecodeFileInfo(out, &fileinfo)); ASSERT_EQ(newFileInfo7.filename(), fileinfo.filename()); @@ -321,7 +321,7 @@ TEST_F(TestEtcdClinetImp, test_EtcdClientInterface) { } TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { - // 准备一批数据 + // Prepare a batch of data // "011" "013" "015" "017" "019" for (int i = 1; i <= 9; i += 2) { std::string key = std::string("01") + std::to_string(i); @@ -336,13 +336,13 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(EtcdErrCode::EtcdOK, client_->Put(key, value)); } - // 获取当前revision - // 通过GetCurrentRevision获取 + // Obtain the current revision + // Obtained through GetCurrentRevision int64_t curRevision; ASSERT_EQ(EtcdErrCode::EtcdOK, client_->GetCurrentRevision(&curRevision)); LOG(INFO) << "get current revision: " << curRevision; - // 根据当前revision获取前5个key-value + // Obtain the top 5 key values based on the current revision std::vector out; std::string lastKey; int res = client_->ListWithLimitAndRevision("01", "", 5, curRevision, &out, @@ -355,7 +355,7 @@ TEST_F(TestEtcdClinetImp, test_ListWithLimitAndRevision) { ASSERT_EQ(value, out[i - 1]); } - // 根据当前revision获取后5个key-value + // Obtain the last 5 key values based on the current revision out.clear(); res = client_->ListWithLimitAndRevision(lastKey, "", 5, curRevision, &out, &lastKey); @@ -395,37 +395,37 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { uint64_t leaderOid; { - // 1. leader1竞选成功,client退出后leader2竞选成功 + // 1. leader1 successfully ran, but leader2 successfully ran after client exited LOG(INFO) << "test case1 start..."; - // 启动一个线程竞选leader + // Start a thread to run for the leader int electionTimeoutMs = 0; uint64_t targetOid; common::Thread thread1(&EtcdClientImp::CampaignLeader, client_, pfx, leaderName1, sessionnInterSec, electionTimeoutMs, &targetOid); - // 等待线程1执行完成, 线程1执行完成就说明竞选成功, - // 否则electionTimeoutMs为0的情况下会一直hung在里面 + // Waiting for thread 1 to complete execution indicates a successful election, + // Otherwise, if electionTimeoutMs is 0, they will remain in it all the time thread1.join(); LOG(INFO) << "thread 1 exit."; client_->CloseClient(); - // 启动第二个线程竞选leader + // Start the second thread to run for the leader auto client2 = std::make_shared(); ASSERT_EQ(0, client2->Init(conf, dialtTimeout, retryTimes)); common::Thread thread2(&EtcdClientImp::CampaignLeader, client2, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); - // 线程1退出后,leader2会当选 + // After thread1 exits, leader2 will be elected thread2.join(); LOG(INFO) << "thread 2 exit."; - // leader2为leader的情况下此时观察leader1的key应该发现session过期 + // If leader2 is the leader, observing the key of leader1 at this time should reveal that the session has expired ASSERT_EQ(EtcdErrCode::EtcdObserverLeaderInternal, client2->LeaderObserve(targetOid, leaderName1)); client2->CloseClient(); } { - // 2. leader1竞选成功后,不退出; leader2竞选超时 + // 2. After the successful election of leader1, do not withdraw; leader2 campaign timeout LOG(INFO) << "test case2 start..."; int electionTimeoutMs = 1000; auto client1 = std::make_shared(); @@ -436,7 +436,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader2再次竞选 + // leader2 is running again common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); @@ -446,8 +446,8 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { } { - // 3. leader1竞选成功后,删除key; leader2竞选成功; observe leader1改变; - // observer leader2的过程中etcd挂掉 + // 3. After the successful election of leader1, delete the key; The leader2 campaign was successful; Observe leader1 changed; + // During the process of observer leader2, etcd crashes LOG(INFO) << "test case3 start..."; uint64_t targetOid; int electionTimeoutMs = 0; @@ -458,17 +458,17 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { &targetOid); thread1.join(); LOG(INFO) << "thread 1 exit."; - // leader1卸任leader + // leader1 Resignation Leader ASSERT_EQ(EtcdErrCode::EtcdLeaderResiginSuccess, client1->LeaderResign(targetOid, 1000)); - // leader2当选 + // leader2 elected common::Thread thread2(&EtcdClientImp::CampaignLeader, client1, pfx, leaderName2, sessionnInterSec, electionTimeoutMs, &leaderOid); thread2.join(); - // leader2启动线程observe + // leader2 starts thread observe common::Thread thread3(&EtcdClientImp::LeaderObserve, client1, targetOid, leaderName2); std::this_thread::sleep_for(std::chrono::seconds(1)); @@ -477,7 +477,7 @@ TEST_F(TestEtcdClinetImp, test_CampaignLeader) { client1->CloseClient(); LOG(INFO) << "thread 2 exit."; - // 使得etcd完全停掉 + // Make the ETCD completely stop std::this_thread::sleep_for(std::chrono::seconds(2)); } } @@ -495,7 +495,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { chunkinfo->set_copysetid(i + 1); } - // 放入segment,前三个属于文件1,后四个属于文件2 + // Place the segment, with the first three belonging to file1 and the last four belonging to file2 uint64_t id1 = 101; uint64_t id2 = 100001; for (uint32_t i = 0; i < 7; ++i) { @@ -514,7 +514,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { LOG(INFO) << segment.startoffset(); } - // 获取文件1的segment + // Obtain the segment of file1 std::string startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1, 0); std::string endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id1 + 1, 0); @@ -527,7 +527,7 @@ TEST_F(TestEtcdClinetImp, test_ListSegment) { ASSERT_EQ(i * 1024, segment2.startoffset()); } - // 获取文件2的segment + // Obtain the segment of file2 startKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2, 0); endKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(id2 + 1, 0); out.clear(); diff --git a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp index 7fa055321b..34a7b6a21e 100644 --- a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp +++ b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp @@ -53,7 +53,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { HeartbeatInfo info; { - // chunkserver首次更新heartbeatInfo + // Chunkserver updates heartbeatInfo for the first time checker->UpdateLastReceivedHeartbeatTime(1, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now() - std::chrono::milliseconds(4000)); @@ -94,16 +94,16 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver-1 更新为online - // chunkserver-2 心跳miss,保持unstable - // chunkserver-3,chunkserver-5,chunkserver-6心跳offline, - // chunkserver-3的retired状态会被更新, 从心跳map中移除 - // chunkserver-5已经是retired状态,无需更新 - // chunkserver-6 get info失败, 未成功更新状态 - // chunnkserver-7 update失败, 未成功更新状态 - // chunkserver-8, pendding && online, 更新为onLine - // chunkserver-9, pendding && unstable, 更新为retired - // chunkserver-10, pendding && offline, 更新为retired + // chunkserver-1 update to online + // chunkserver-2 Heartbeat Miss, Keep Unstable + // chunkserver-3, chunkserver-5, chunkserver-6 heartbeat offline, + // The retried status of chunkserver-3 will be updated and removed from the heartbeat map + // chunkserver-5 is already in a retired state and does not need to be updated + // chunkserver-6 get info failed, status not successfully updated + // chunkserver-7 update failed, status not successfully updated + // chunkserver-8, pendding && online, updated to onLine + // chunkserver-9, pendding && unstable, updated to retired + // chunkserver-10, pendding && offline, updated to retired EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) .Times(7).WillRepeatedly(Return(kTopoErrCodeSuccess)); ChunkServer cs2(2, "", "", 1, "", 0, "", @@ -164,7 +164,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver 2, 6 ,7 收到心跳 + // chunkserver 2, 6, 7 Heartbeat received checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( diff --git a/test/mds/heartbeat/heartbeat_manager_test.cpp b/test/mds/heartbeat/heartbeat_manager_test.cpp index 54c4397287..86699e7feb 100644 --- a/test/mds/heartbeat/heartbeat_manager_test.cpp +++ b/test/mds/heartbeat/heartbeat_manager_test.cpp @@ -124,7 +124,7 @@ TEST_F(TestHeartbeatManager, test_checkReuqest_abnormal) { ASSERT_EQ(0, response.needupdatecopysets_size()); // 7. startTime not initialized - // TODO(lixiaocui): 后续考虑心跳加上错误码 + // TODO(lixiaocui): Consider adding an error code to the heartbeat in the future ::curve::mds::topology::ChunkServer normalCs( 1, "hello", "", 1, "192.168.10.1", 9000, ""); EXPECT_CALL(*topology_, GetChunkServer(_, _)) diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 11c70f8572..d3eac22039 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -44,7 +44,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { auto mockEtcdClient = std::make_shared(); { - // 1. list失败 + // 1. list failed EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) @@ -55,7 +55,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { } { - // 2. list成功,解析失败 + // 2. list successful, parsing failed std::vector values{"hello"}; EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, @@ -67,7 +67,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { &out, mockEtcdClient)); } { - // 3. 获取已有的segment alloc value成功 + // 3. Successfully obtained the existing segment alloc value std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient, @@ -96,7 +96,7 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { 2, mockEtcdClient, &out)); } { - // 2. ListWithLimitAndRevision成功,但是解析失败 + // 2. ListWithLimitAndRevision succeeded, but parsing failed LOG(INFO) << "start test2......"; std::vector values{"hello"}; std::string lastKey = "021"; @@ -109,7 +109,7 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { 2, mockEtcdClient, &out)); } { - // 3. ListWithLimitAndRevision成功, 解析成功, bundle=1000, 获取个数为1 + // 3. ListWithLimitAndRevision successful, parsing successful, bundle=1000, number obtained is 1 LOG(INFO) << "start test3......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); @@ -133,8 +133,8 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { ASSERT_EQ(1 << 30, out[1]); } { - // 4. ListWithLimitAndRevision成功, 解析成功 - // bundle=1000, 获取个数为1001 + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 LOG(INFO) << "start test4......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index c51e91587c..f250e7e401 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -60,14 +60,14 @@ class AllocStatisticTest : public ::testing::Test { TEST_F(AllocStatisticTest, test_Init) { { - // 1. 从etcd中获取当前revision失败 + // 1. Failed to obtain the current revision from ETCD LOG(INFO) << "test1......"; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). WillOnce(Return(EtcdErrCode::EtcdCanceled)); ASSERT_EQ(-1, allocStatistic_->Init()); } { - // 2. 获取已经存在的logicalPool对应的alloc大小失败 + // 2. Failed to obtain the alloc size corresponding to the existing logicalPool LOG(INFO) << "test2......"; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). WillOnce(Return(EtcdErrCode::EtcdOK)); @@ -80,7 +80,7 @@ TEST_F(AllocStatisticTest, test_Init) { ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); } { - // 3. init成功 + // 3. init successful LOG(INFO) << "test3......"; std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; @@ -99,8 +99,8 @@ TEST_F(AllocStatisticTest, test_Init) { } TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // 初始化 allocStatistic - // 旧值: logicalPooId(1):1024 + // Initialize allocStatistics + // Old value: logicalPooId(1):1024 std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) @@ -124,19 +124,19 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { values.emplace_back(encodeSegment); } - // 1. 在定期持久化线程和统计线程启动前,只能获取旧值 + // 1 Only old values can be obtained before regular persistent threads and statistical threads are started int64_t alloc; ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024, alloc); ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - // 2. 更新segment的值 + // 2 Update the value of segment allocStatistic_->DeAllocSpace(1, 64, 1); allocStatistic_->AllocSpace(1, 32, 1); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024 - 32, alloc); - // 设置mock的etcd中segment的值 + // Set the value of segment in the ETCD of the mock // logicalPoolId(1):500 * (1<<30) // logicalPoolId(2):501 * (1<<30) segment.set_logicalpoolid(2); @@ -167,7 +167,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { .WillOnce(Return(EtcdErrCode::EtcdCanceled)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - // 设置mock的Put结果 + // Set the Put result of the mock EXPECT_CALL(*mockEtcdClient_, Put( NameSpaceStorageCodec::EncodeSegmentAllocKey(1), NameSpaceStorageCodec::EncodeSegmentAllocValue( @@ -198,7 +198,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - // 2. 启动定期持久化线程和统计线程 + // 2 Start regular persistence and statistics threads for (int i = 1; i <= 2; i++) { allocStatistic_->AllocSpace(i, 1L << 30, i + 3); } @@ -211,7 +211,7 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { ASSERT_EQ(502L *(1 << 30), alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); - // 再通过alloc进行更新 + // Update through alloc again for (int i = 1; i <= 2; i++) { allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); } diff --git a/test/mds/nameserver2/clean_core_test.cpp b/test/mds/nameserver2/clean_core_test.cpp index 5288fd83d6..a615cc433d 100644 --- a/test/mds/nameserver2/clean_core_test.cpp +++ b/test/mds/nameserver2/clean_core_test.cpp @@ -139,8 +139,8 @@ TEST_F(CleanCoreTest, testcleansnapshotfile) { StatusCode::kSnapshotFileDeleteError); } { - // 联调Bug修复:快照文件共享源文件的segment,所以在查询segment的时候需要使用 - // ParentID 进行查找 + // Joint debugging bug fix: The snapshot file shares a segment of the source file, so it needs to be used when querying segments + // ParentID for lookup uint32_t segmentNum = kMiniFileLength / DefaultSegmentSize; uint64_t expectParentID = 101; for (uint32_t i = 0; i < segmentNum; i++) { diff --git a/test/mds/nameserver2/curvefs_test.cpp b/test/mds/nameserver2/curvefs_test.cpp index 899b942ee8..570623f60a 100644 --- a/test/mds/nameserver2/curvefs_test.cpp +++ b/test/mds/nameserver2/curvefs_test.cpp @@ -68,7 +68,7 @@ class CurveFSTest: public ::testing::Test { mockcleanManager_ = std::make_shared(); topology_ = std::make_shared(); snapshotClient_ = std::make_shared(); - // session repo已经mock,数据库相关参数不需要 + // The session repo has been mocked, and database related parameters are not required fileRecordManager_ = std::make_shared(); fileRecordOptions_.fileRecordExpiredTimeUs = 5 * 1000; fileRecordOptions_.scanIntervalTimeUs = 1 * 1000; @@ -1645,18 +1645,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo3.set_id(11); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find /file1 .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find /trash/file2 .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check if /trash/file2 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo3), @@ -1684,13 +1684,13 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(4) - // 查找/file1 + // Find /file1 .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find /trash/file2 .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), @@ -1708,18 +1708,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find /file1 .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find /trash/file2 .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check if /trash/file2 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), @@ -1745,18 +1745,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find /file1 .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find /trash/file2 .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check if /trash/file2 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), @@ -1783,18 +1783,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find/file1 .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find/trash/file2 .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check/trash/file2 if there is a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), @@ -1821,18 +1821,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find/file1 .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find/trash/file2 .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check/trash/file2 if there is a snapshot .WillOnce(DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce(DoAll(SetArgPointee<2>(fileInfo1), @@ -1859,18 +1859,18 @@ TEST_F(CurveFSTest, testRenameFile) { fileInfo2.set_filetype(FileType::INODE_DIRECTORY); EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(6) - // 查找/file1 + // Find/file1 .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /file1是否有快照 + // Check if /file1 has a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // 查找/trash/file2 + // Find/trash/file2 .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( DoAll(SetArgPointee<2>(fileInfo1), Return(StoreStatus::OK))) - // check /trash/file2是否有快照 + // Check/trash/file2 if there is a snapshot .WillOnce( DoAll(SetArgPointee<2>(fileInfo2), Return(StoreStatus::OK))) .WillOnce( @@ -3643,7 +3643,7 @@ TEST_F(CurveFSTest, CheckSnapShotFileStatus) { } TEST_F(CurveFSTest, testOpenFile) { - // 文件不存在 + // File does not exist { ProtoSession protoSession; FileInfo fileInfo; @@ -3657,7 +3657,7 @@ TEST_F(CurveFSTest, testOpenFile) { ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // open目录 + // Open directory { ProtoSession protoSession; FileInfo fileInfo; @@ -3671,7 +3671,7 @@ TEST_F(CurveFSTest, testOpenFile) { ASSERT_EQ(curvefs_->GetOpenFileNum(), 0); } - // 执行成功 + // Execution successful { ProtoSession protoSession; FileInfo fileInfo; @@ -3857,7 +3857,7 @@ TEST_F(CurveFSTest, testCloseFile) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) .WillOnce(Return(StoreStatus::OK)); @@ -3866,7 +3866,7 @@ TEST_F(CurveFSTest, testCloseFile) { curvefs_->OpenFile("/file1", "127.0.0.1", &protoSession, &fileInfo), StatusCode::kOK); - // 执行成功 + // Execution successful { EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) @@ -3883,7 +3883,7 @@ TEST_F(CurveFSTest, testRefreshSession) { FileInfo fileInfo; fileInfo.set_filetype(FileType::INODE_PAGEFILE); - // 先插入session + // Insert session first EXPECT_CALL(*storage_, GetFile(_, _, _)) .Times(1) .WillOnce(Return(StoreStatus::OK)); @@ -3892,7 +3892,7 @@ TEST_F(CurveFSTest, testRefreshSession) { &protoSession, &fileInfo), StatusCode::kOK); - // 文件不存在 + // File does not exist { FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) @@ -3903,7 +3903,7 @@ TEST_F(CurveFSTest, testRefreshSession) { StatusCode::kFileNotExists); } - // 执行成功 + //Execution successful { FileInfo fileInfo1; EXPECT_CALL(*storage_, GetFile(_, _, _)) @@ -3921,7 +3921,7 @@ TEST_F(CurveFSTest, testRefreshSession) { TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配,date超时 + // Root user, signature matching, date timeout { std::string filename = "/file1"; std::string str2sig = Authenticator::GetString2Signature(date, @@ -3937,14 +3937,14 @@ TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { ASSERT_EQ(curvefs_->CheckDestinationOwner("/file1", authOptions_.rootOwner, "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root directory { FileInfo fileInfo; fileInfo.set_owner(authOptions_.rootOwner); @@ -3961,7 +3961,7 @@ TEST_F(CurveFSTest, testCheckRenameNewfilePathOwner) { TEST_F(CurveFSTest, testCheckPathOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配, 并检测date过期 + // Root user, signature matching, and detecting date expiration { std::string filename = "/file1"; std::string str2sig = Authenticator::GetString2Signature(date, @@ -3978,14 +3978,14 @@ TEST_F(CurveFSTest, testCheckPathOwner) { StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { ASSERT_EQ(curvefs_->CheckPathOwner("/file1", authOptions_.rootOwner, "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功, 并检测date超时 + // Normal user, non root user authentication successful for files in the root directory, and detection of date timeout { ASSERT_EQ(curvefs_->CheckPathOwner("/file1", "normaluser", "wrongpass", date), @@ -3999,7 +3999,7 @@ TEST_F(CurveFSTest, testCheckPathOwner) { TEST_F(CurveFSTest, testCheckFileOwner) { uint64_t date = TimeUtility::GetTimeofDayUs(); - // root用户,签名匹配 + // Root user, signature matching { std::string filename = "/file1"; std::string str2sig = Authenticator::GetString2Signature(date, @@ -4015,14 +4015,14 @@ TEST_F(CurveFSTest, testCheckFileOwner) { StatusCode::kOwnerAuthFail); } - // root用户,签名不匹配 + // Root user, signature mismatch { ASSERT_EQ(curvefs_->CheckFileOwner("/file1", authOptions_.rootOwner, "wrongpass", date), StatusCode::kOwnerAuthFail); } - // 普通用户,根目录下的文件非root用户认证成功 + // Normal user, non root user authentication succeeded for files in the root directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); @@ -4035,7 +4035,7 @@ TEST_F(CurveFSTest, testCheckFileOwner) { "normaluser", "", date), StatusCode::kOK); } - // 普通用户,根目录下的文件非root用户认证失败 + // Ordinary user, non root user authentication failed for files in the root directory { FileInfo fileInfo; fileInfo.set_owner("normaluser"); diff --git a/test/mds/nameserver2/file_lock_test.cpp b/test/mds/nameserver2/file_lock_test.cpp index 25b524d195..676909e9b4 100644 --- a/test/mds/nameserver2/file_lock_test.cpp +++ b/test/mds/nameserver2/file_lock_test.cpp @@ -169,8 +169,8 @@ TEST_F(FileWriteLockGuardTest, LockUnlockTest) { ASSERT_EQ(flm.GetLockEntryNum(), 0); } -// 以下这种情况,跑测试的时候会出现Segmentation fault,是锁的实现机制的问题 -// 要避免这样使用锁,已在代码里进行规避,以下注释的测试保留,提醒使用者注意 +// In the following scenario, a Segmentation fault may occur when running tests, due to issues with the locking mechanism. +// To avoid using locks in this way, precautions have been taken in the code. The commented-out test cases are retained to remind users to be cautious. /* TEST_F(FileWriteLockGuardTest, LockUnlockTest1) { { diff --git a/test/mds/nameserver2/file_record_test.cpp b/test/mds/nameserver2/file_record_test.cpp index 37a728b012..9b9241debc 100644 --- a/test/mds/nameserver2/file_record_test.cpp +++ b/test/mds/nameserver2/file_record_test.cpp @@ -37,15 +37,15 @@ TEST(FileRecordTest, timeout_test) { butil::EndPoint ep; butil::str2endpoint("127.0.0.1:1111", &ep); - // 设置有效时间为1ms + // Set the effective time to 1ms FileRecord record(1 * 1000, "0.0.6", ep); - // 判断超时 + // Judgment timeout ASSERT_EQ(false, record.IsTimeout()); - // 判断版本号 + // Determine version number ASSERT_EQ("0.0.6", record.GetClientVersion()); - // 睡眠一段时间判断超时是否生效 + // Sleep for a period of time to determine if the timeout is effective std::this_thread::sleep_for(std::chrono::milliseconds(15)); ASSERT_EQ(true, record.IsTimeout()); @@ -89,9 +89,9 @@ TEST(FileRecordManagerTest, normal_test) { kInvalidPort); fileRecordManager.UpdateFileRecord("file4", "0.0.6", "127.0.0.1", 1235); - // 总共记录了4个文件 - // 其中一个port为Invalid - // 其中两个文件打开的client ip port相同 + // A total of 4 files were recorded + // One of the ports is Invalid + // Two of the files have the same client IP port opened ASSERT_EQ(2, fileRecordManager.ListAllClient().size()); // ClientIpPortType clientIpPort; @@ -127,7 +127,7 @@ TEST(FileRecordManagerTest, open_file_num_test) { ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); - // 插入两个记录 + // Insert two records fileRecordManager.UpdateFileRecord("file1", "", "127.0.0.1", 0); fileRecordManager.UpdateFileRecord("file2", "", "127.0.0.1", 0); @@ -138,18 +138,18 @@ TEST(FileRecordManagerTest, open_file_num_test) { } }; - // 只对 file1 定期续约 + // Regular renewal only for file1 std::thread th(task, "file1"); - // sleep 50ms后,file2 会超时 + // After 50ms of sleep, file2 will timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(1, fileRecordManager.GetOpenFileNum()); - // 停止 file1 的定期续约 + // Stop regular renewal of file1 running = false; th.join(); - // sleep 50ms后,file1 也会超时 + // After 50ms of sleep, file1 will also timeout std::this_thread::sleep_for(std::chrono::milliseconds(50)); ASSERT_EQ(0, fileRecordManager.GetOpenFileNum()); diff --git a/test/mds/nameserver2/namespace_service_test.cpp b/test/mds/nameserver2/namespace_service_test.cpp index c5247030f2..f78b2c89b8 100644 --- a/test/mds/nameserver2/namespace_service_test.cpp +++ b/test/mds/nameserver2/namespace_service_test.cpp @@ -285,7 +285,7 @@ TEST_F(NameSpaceServiceTest, test1) { brpc::Controller cntl; uint64_t fileLength = kMiniFileLength; - // 创建file1,owner1 + // Create file1, owner1 request.set_filename("/file1"); request.set_owner("owner1"); request.set_date(TimeUtility::GetTimeofDayUs()); @@ -347,7 +347,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个不存在的目录下创建文件,会失败 kFileNotExists + // Creating a file in a non-existent directory will fail kFileNotExists cntl.Reset(); request.set_filename("/dir4/file4"); request.set_owner("owner4"); @@ -363,7 +363,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 在一个文件下创建文件,会失败 kNotDirectory + // Creating a file under one file will fail kNotDirectory cntl.Reset(); request.set_filename("/file2/file4"); request.set_owner("owner2"); @@ -379,7 +379,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的文件,会创建失败kFileExists + // If you create an existing file, it will fail to create kFileExists cntl.Reset(); request.set_filename("/file2"); request.set_poolset(""); @@ -396,7 +396,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 如果创建一个已经存在的目录,会创建失败kFileExists + // If you create an existing directory, it will fail to create kFileExist cntl.Reset(); request.set_filename("/dir"); request.set_owner("owner3"); @@ -412,7 +412,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建其他类型文件,返回kNotSupported + // Create other types of files and return kNotSupported cntl.Reset(); request.set_filename("/file4"); request.set_owner("owner4"); @@ -457,7 +457,7 @@ TEST_F(NameSpaceServiceTest, test1) { FAIL(); } - // 创建文件名不规范的文件会失败 + // Creating files with non-standard file names will fail cntl.Reset(); request.set_filename("/file4/"); request.set_owner("owner4"); @@ -567,7 +567,7 @@ TEST_F(NameSpaceServiceTest, test1) { } // test GetOrAllocateSegment - // 为file1分配空间 + // Allocate space for file1 cntl.Reset(); GetOrAllocateSegmentRequest request2; GetOrAllocateSegmentResponse response2; @@ -682,7 +682,7 @@ TEST_F(NameSpaceServiceTest, test1) { // test change owner { - // 当前有文件 /file1(owner1) , /file2(owner2), /dir/file3(owner3) + // There are currently /file1(owner1) , /file2(owner2), /dir/file3(owner3) // changeowner success cntl.Reset(); ChangeOwnerRequest request; @@ -799,7 +799,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // changeowner 文件名不规范,失败 + // changeowner file name is not standardized, failed cntl.Reset(); request.set_filename("/file1/"); request.set_newowner("owner1"); @@ -820,12 +820,12 @@ TEST_F(NameSpaceServiceTest, test1) { } // test RenameFile - // 重命名到根目录下,非root owner,失败 - // fileinfoid不匹配,失败 - // 重命名成功 /dir/file3 -> /dir/file4 - // 原文件不存在,重命名失败 - // 重命名到根目录下,root owner,成功 /dir/file4 -> /file4 - // 文件名不规范,失败 + // Renaming to root directory, not root owner, failed + // fileinfoid mismatch, failed + // Rename successful /dir/file3 -> /dir/file4 + // The original file does not exist, renaming failed + // Rename to the root directory, root owner, successful /dir/file4 -> /file4 + // File name not standardized, failed cntl.Reset(); RenameFileRequest request4; RenameFileResponse response4; @@ -951,8 +951,8 @@ TEST_F(NameSpaceServiceTest, test1) { } // test ExtendFile - // 扩容file2,第一次扩大,成功;第二次缩小,失败 - // 扩容的文件名不符合规范,失败 + // Expanding file2 for the first time, successful; Second reduction, failed + // The expanded file name does not meet the specifications and failed uint64_t newsize = kMiniFileLength * 2; cntl.Reset(); ExtendFileRequest request5; @@ -992,8 +992,8 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // begin session test,开始测试时,有/file1,/file2和/file4 - // OpenFile case1. 文件不存在,返回kFileNotExists + // begin session test, at the beginning of the session test, there are /file1,/file2, and/file4 + // OpenFile case1. File does not exist, returned kFileNotExists cntl.Reset(); OpenFileRequest request8; OpenFileResponse response8; @@ -1008,7 +1008,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // OpenFile case2. 文件存在,没有open过,返回成功、session、fileInfo + // OpenFile case2. The file exists and has not been opened. Success, session, and fileInfo are returned cntl.Reset(); OpenFileRequest request9; OpenFileResponse response9; @@ -1043,7 +1043,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // openFile case3, 文件名不符合规范 + // OpenFile case3, file name does not meet specifications OpenFileRequest request11; OpenFileResponse response11; cntl.Reset(); @@ -1058,7 +1058,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case1. 文件不存在,返回kFileNotExists + // CloseFile case1 File does not exist, returned kFileNotExists cntl.Reset(); CloseFileRequest request12; CloseFileResponse response12; @@ -1074,7 +1074,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case2. 文件存在,session存在,返回成功 + // CloseFile case2 File exists, session exists, success returne CloseFileRequest request13; CloseFileResponse response13; cntl.Reset(); @@ -1092,7 +1092,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // CloseFile case3. 文件名不符合规范 + // CloseFile case3. The file name does not meet the specification cntl.Reset(); request14.set_filename("/file2/"); request14.set_owner("owner2"); @@ -1106,7 +1106,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case1. 文件不存在,返回kFileNotExists + // RefreshSession case1. File does not exist, returned kFileNotExists cntl.Reset(); ReFreshSessionRequest request15; ReFreshSessionResponse response15; @@ -1124,7 +1124,7 @@ TEST_F(NameSpaceServiceTest, test1) { ASSERT_TRUE(false); } - // RefreshSession case2. 文件名不符合规范 + // RefreshSession case2. The file name does not meet the specifications ReFreshSessionRequest request18; ReFreshSessionResponse response18; cntl.Reset(); @@ -1426,7 +1426,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { CurveFSService_Stub stub(&channel); - // 先创建文件/file1,目录/dir1,文件/dir1/file2 + // First create file '/file1', directory '/dir1', file '/dir1/file2' std::vector logicalPools{1, 2, 3}; EXPECT_CALL(*topology_, GetLogicalPoolInCluster(_)) .Times(AtLeast(1)) @@ -1480,7 +1480,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { FAIL(); } - // 查看文件/file1,目录/dir1,文件/dir1/file2的状态 + // View the status of file '/file1', directory '/dir1', and file '/dir1/file2' cntl.Reset(); GetFileInfoRequest request1; GetFileInfoResponse response1; @@ -1539,7 +1539,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 文件/dir1/file2申请segment + // File '/dir1/file2' application segment GetOrAllocateSegmentRequest allocRequest; GetOrAllocateSegmentResponse allocResponse; for (int i = 0; i < 10; i++) { @@ -1558,8 +1558,8 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { } } - // 开始测试删除文件逻辑 - // 1 如果文件有快照,那么删除文件返回kFileUnderSnapShot + // Start testing delete file logic + // 1. If the file has a snapshot, deleting the file returns kFileUnderSnapShot cntl.Reset(); CreateSnapShotRequest snapshotRequest; CreateSnapShotResponse snapshotResponses; @@ -1639,7 +1639,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { << "max attempts for check snapshot status exhausted"; - // 2 如果目录下有文件,那么删除目录返回kDirNotEmpty + // 2. If there are files in the directory, deleting the directory returns kDirNotEmpty cntl.Reset(); request3.set_filename("/dir1"); request3.set_owner("owner"); @@ -1653,7 +1653,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 3 如果传入的fileid不匹配,删除文件失败 + // 3. If the passed in fileids do not match, deleting the file fails cntl.Reset(); DeleteFileRequest request5; DeleteFileResponse response5; @@ -1670,7 +1670,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 4 删除文件/file1成功,查询文件已经删除 + // 4. Successfully deleted file '/file1', query file has been deleted cntl.Reset(); request3.set_filename("/file1"); request3.set_owner("owner"); @@ -1696,7 +1696,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 查询垃圾箱 + // Query Trash Bin ListDirRequest listRequest; ListDirResponse listResponse; cntl.Reset(); @@ -1720,7 +1720,7 @@ TEST_F(NameSpaceServiceTest, deletefiletests) { ASSERT_TRUE(false); } - // 删除文件/dir1/file2成功,删除目录/dir1成功,查询目录和文件均已经删除 + // Successfully deleted file '/dir1/file2', deleted directory '/dir1', queried directory and files have been deleted using ::curve::mds::topology::ChunkServerStatus; using ::curve::mds::topology::OnlineState; using ::curve::chunkserver::ChunkRequest; diff --git a/test/mds/schedule/coordinator_test.cpp b/test/mds/schedule/coordinator_test.cpp index b18aa07b31..0d5821903a 100644 --- a/test/mds/schedule/coordinator_test.cpp +++ b/test/mds/schedule/coordinator_test.cpp @@ -117,19 +117,19 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator->CopySetHeartbeat( testCopySetInfo, ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ(ConfigChangeType::ADD_PEER, res.type()); - // 第二次获取chunkserver失败 + // Failed to obtain chunkserver for the second time ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( testCopySetInfo, ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -146,7 +146,7 @@ TEST(CoordinatorTest, test_AddPeer_CopySetHeartbeat) { info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); @@ -267,20 +267,20 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { Operator opRes; ASSERT_TRUE(coordinator->GetOpController()->GetOperatorById( info.id, &opRes)); - // 第一次下发配置 + // First configuration distribution ASSERT_EQ(4, coordinator->CopySetHeartbeat( testCopySetInfo, ConfigChangeInfo{}, &res)); ASSERT_EQ("127.0.0.1:9000:0", res.configchangeitem().address()); ASSERT_EQ("127.0.0.1:9001:0", res.oldpeer().address()); ASSERT_EQ(ConfigChangeType::CHANGE_PEER, res.type()); - // 第二次获取chunkserver失败 + // Failed to obtain chunkserver for the second time ASSERT_EQ(UNINTIALIZE_ID, coordinator->CopySetHeartbeat( testCopySetInfo, ConfigChangeInfo{}, &res)); } { - // 3. 下发配置,但candidate是offline状态 + // 3. Distribute configuration, but candidate is in offline status EXPECT_CALL(*topoAdapter, CopySetFromTopoToSchedule(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(info), Return(true))); @@ -297,7 +297,7 @@ TEST(CoordinatorTest, test_ChangePeer_CopySetHeartbeat) { info.id, &opRes)); csInfo.state = OnlineState::ONLINE; - // 获取不到chunkserver的信息 + // Unable to obtain chunkserver information ASSERT_TRUE(coordinator->GetOpController()->AddOperator(testOperator)); EXPECT_CALL(*topoAdapter, GetChunkServerInfo(_, _)) .WillOnce(Return(false)); @@ -363,12 +363,12 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { scheduleOption, std::make_shared(topo)); { - // 1. copyset上没有要变更的operator + // 1. There are no operators to change on the copyset ASSERT_FALSE(coordinator->ChunkserverGoingToAdd(1, CopySetKey{1, 1})); } { - // 2. copyset上有leader变更,并且目的leader为chunkserver-1 + // 2. There is a leader change on the copyset and the target leader is chunkserver-1 Operator testOperator(1, CopySetKey{1, 1}, OperatorPriority::NormalPriority, steady_clock::now(), @@ -378,7 +378,7 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { } { - // 3. copyset上有remove peer操作 + // 3. There is a remove peer operation on the copyset Operator testOperator(1, CopySetKey{1, 2}, OperatorPriority::NormalPriority, steady_clock::now(), @@ -388,7 +388,7 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { } { - // 4. copyset上有add peer操作, target不是1 + // 4. There is an add peer operation on the copyset, but the target is not 1 Operator testOperator(1, CopySetKey{1, 3}, OperatorPriority::NormalPriority, steady_clock::now(), @@ -398,7 +398,7 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { } { - // 5. copyset上有add peer操作, target是1 + // 5. There is an add peer operation on the copyset, with a target of 1 Operator testOperator(1, CopySetKey{1, 4}, OperatorPriority::NormalPriority, steady_clock::now(), @@ -408,7 +408,7 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { } { - // 6. copyset上有change peer操作,target不是1 + // 6. There is a change peer operation on the copyset, but the target is not 1 Operator testOperator(1, CopySetKey{1, 5}, OperatorPriority::NormalPriority, steady_clock::now(), @@ -418,7 +418,7 @@ TEST(CoordinatorTest, test_ChunkserverGoingToAdd) { } { - // 7. copyset上有change peer操作,target是1 + // 7. There is a change peer operation on the copyset, with a target of 1 Operator testOperator(1, CopySetKey{1, 6}, OperatorPriority::NormalPriority, steady_clock::now(), @@ -484,10 +484,10 @@ TEST(CoordinatorTest, test_RapidLeaderSchedule) { TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { /* - 场景: - chunkserver1: offline 有恢复op - chunkserver2: offline 没有恢复op,没有candidate,有其他op - chunkserver3: offline 有candidate + Scenario: + chunkserver1: offline has recovery op + chunkserver2: offline has no recovery op, no candidate, and other ops + chunkserver3: offline has candidate chunkserver4: online chunkserver4: online */ @@ -496,11 +496,11 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { auto topoAdapter = std::make_shared(); auto coordinator = std::make_shared(topoAdapter); - // 获取option + // Get option ScheduleOption scheduleOption = GetScheduleOption(); coordinator->InitScheduler(scheduleOption, metric); - // 构造chunkserver + // Construct chunkserver std::vector chunkserverInfos; std::vector peerInfos; for (int i = 1; i <= 6; i++) { @@ -519,7 +519,7 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { peerInfos.emplace_back(peer); } - // 构造op + // Construct op Operator opForCopySet1( 1, CopySetKey{1, 1}, OperatorPriority::HighPriority, @@ -534,7 +534,7 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { std::make_shared(2, 4)); ASSERT_TRUE(coordinator->GetOpController()->AddOperator(opForCopySet2)); - // 构造copyset + // Construct a copyset std::vector peersFor2({peerInfos[1], peerInfos[3], peerInfos[4]}); CopySetInfo copyset2( CopySetKey{1, 2}, 1, 4, @@ -556,7 +556,7 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { configChangeInfoForCS3, CopysetStatistics{}); - // 1. 查询所有chunkserver + // 1. Query all chunkservers { EXPECT_CALL(*topoAdapter, GetChunkServerInfos()) .WillOnce(Return(chunkserverInfos)); @@ -578,7 +578,7 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { ASSERT_FALSE(statusMap[6]); } - // 2. 查询指定chunkserver, 但chunkserver不存在 + // 2. Query for specified chunkserver, but chunkserver does not exist { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(7, _)) .WillOnce(Return(false)); @@ -589,7 +589,7 @@ TEST(CoordinatorTest, test_QueryChunkServerRecoverStatus) { std::vector{7}, &statusMap)); } - // 3. 查询指定chunkserver, 不在恢复中 + // 3. Query the specified chunkserver, not in recovery { EXPECT_CALL(*topoAdapter, GetChunkServerInfo(6, _)) .WillOnce(DoAll(SetArgPointee<1>(chunkserverInfos[5]), diff --git a/test/mds/schedule/leaderScheduler_test.cpp b/test/mds/schedule/leaderScheduler_test.cpp index 3be00637b0..85ab24d59c 100644 --- a/test/mds/schedule/leaderScheduler_test.cpp +++ b/test/mds/schedule/leaderScheduler_test.cpp @@ -439,7 +439,7 @@ TEST_F(TestLeaderSchedule, test_tranferLeaderout_pendding) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); @@ -521,7 +521,7 @@ TEST_F(TestLeaderSchedule, test_transferLeaderIn_normal) { TEST_F(TestLeaderSchedule, test_transferLeaderIn_pendding) { // chunkserver1 chunkserver2 chunkserver3 chunkserver4 // leaderCount 0 3 2 1 - // copyset 1 1 1(有operator) + // copyset 1 1 1(with operator) // 2 2 2 // 3 3 3 PeerInfo peer1(1, 1, 1, "192.168.10.1", 9000); diff --git a/test/mds/schedule/operatorStep_test.cpp b/test/mds/schedule/operatorStep_test.cpp index 3cab9d2911..118bc1f602 100644 --- a/test/mds/schedule/operatorStep_test.cpp +++ b/test/mds/schedule/operatorStep_test.cpp @@ -238,7 +238,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer = std::make_shared(3, 4); CopySetConf copySetConf; - // 1. change peer还未开始 + // 1. change peer has not yet started { ASSERT_EQ(ApplyStatus::Ordered, changePeer->Apply(originCopySetInfo, ©SetConf)); @@ -248,7 +248,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { } auto testCopySetInfo = originCopySetInfo; - // 2. change peer完成 + // 2. change peer completed { auto testCopySetInfo = originCopySetInfo; testCopySetInfo.peers.erase(testCopySetInfo.peers.begin() + 2); @@ -258,7 +258,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 3. change peer失败 + // 3. change peer failed { testCopySetInfo = originCopySetInfo; testCopySetInfo.candidatePeerInfo = PeerInfo(4, 1, 1, "", 9000); @@ -278,7 +278,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 4. 上报未完成 + // 4. Reporting incomplete { testCopySetInfo.configChangeInfo.set_finished(false); testCopySetInfo.configChangeInfo.release_err(); @@ -286,7 +286,7 @@ TEST(OperatorStepTest, OperatorStepTest_ChangePeer_Test) { changePeer->Apply(testCopySetInfo, ©SetConf)); } - // 5. 上报的变更类型和mds中的oprator不相符合 + // 5. The reported change type does not match the optimizer in mds { testCopySetInfo.configChangeInfo.set_type(ConfigChangeType::ADD_PEER); testCopySetInfo.configChangeInfo.set_finished(true); diff --git a/test/mds/schedule/rapidLeaderSheduler_test.cpp b/test/mds/schedule/rapidLeaderSheduler_test.cpp index 3caecf7111..e1e96c6fa8 100644 --- a/test/mds/schedule/rapidLeaderSheduler_test.cpp +++ b/test/mds/schedule/rapidLeaderSheduler_test.cpp @@ -77,7 +77,7 @@ class TestRapidLeaderSchedule : public ::testing::Test { TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { std::shared_ptr rapidLeaderScheduler; - // 1. mds没有任何logicalpool + // 1. Mds does not have any logicalpool { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); @@ -93,7 +93,7 @@ TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { ASSERT_EQ(kScheduleErrCodeSuccess, rapidLeaderScheduler->Schedule()); } - // 2. mds逻辑池列表中没有指定logicalpool + // 2. No logicalpool specified in the mds logical pool list { rapidLeaderScheduler = std::make_shared( opt_, topoAdapter_, opController_, 2); @@ -107,7 +107,7 @@ TEST_F(TestRapidLeaderSchedule, test_logicalPool_not_exist) { TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { std::shared_ptr rapidLeaderScheduler; { - // 1. 指定logicalpool中没有chunkserver + // 1. There is no chunkserver in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -121,7 +121,7 @@ TEST_F(TestRapidLeaderSchedule, test_initResource_no_need_schedule) { } { - // 2. 指定logicalpool中没有copyset + // 2. There is no copyset in the specified logicalpool EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -141,7 +141,7 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { opt_, topoAdapter_, opController_, 1); { - // 1. copyset的副本数目为1, 不会产生迁移 + // 1. The number of copies for copyset is 1, and migration will not occur EXPECT_CALL(*topoAdapter_, GetLogicalpools()) .WillOnce(Return(std::vector{1})); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(1)) @@ -158,7 +158,7 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } { - // 2. chunkserver上拥有的leader数目最多相差1, 不会产生迁移 + // 2. The maximum difference in the number of leaders owned on chunkserver is 1, and migration will not occur // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 EXPECT_CALL(*topoAdapter_, GetLogicalpools()) @@ -175,7 +175,7 @@ TEST_F(TestRapidLeaderSchedule, test_select_target_fail) { } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 @@ -217,7 +217,7 @@ TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_success) { } TEST_F(TestRapidLeaderSchedule, test_rapid_schedule_pendding) { - // 快速均衡成功 + // Fast balancing successful // chunkserver-1 chunkserver-2 chunkserver-3 // copyset-1(leader) copyset-1 copyset-1 // copyset-2(leader) copyset-2 copyset-2 diff --git a/test/mds/schedule/recoverScheduler_test.cpp b/test/mds/schedule/recoverScheduler_test.cpp index c7c11b299e..0ec170f4d2 100644 --- a/test/mds/schedule/recoverScheduler_test.cpp +++ b/test/mds/schedule/recoverScheduler_test.cpp @@ -218,7 +218,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { EXPECT_CALL(*topoAdapter_, GetAvgScatterWidthInLogicalPool(_)) .WillRepeatedly(Return(90)); { - // 1. 所有chunkserveronline + // 1. All chunkserveronline EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), Return(true))); @@ -233,7 +233,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 2. 副本数量大于标准,leader挂掉 + // 2. The number of copies exceeds the standard, and the leader is suspended csInfo1.state = OnlineState::OFFLINE; EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(id1, _)) .WillOnce(DoAll(SetArgPointee<1>(csInfo1), @@ -248,7 +248,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 3. 副本数量大于标准,follower挂掉 + // 3. The number of copies exceeds the standard, the follower will be suspended opController_->RemoveOperator(op.copysetID); csInfo1.state = OnlineState::ONLINE; csInfo2.state = OnlineState::OFFLINE; @@ -265,7 +265,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 4. 副本数目等于标准, follower挂掉 + // 4. The number of copies equals the standard, and the follower will be dropped opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); @@ -297,7 +297,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 5. 选不出替换chunkserver + // 5. Unable to select a replacement chunkserver opController_->RemoveOperator(op.copysetID); EXPECT_CALL(*topoAdapter_, GetChunkServersInLogicalPool(_)) .WillOnce(Return(std::vector{})); @@ -306,7 +306,7 @@ TEST_F(TestRecoverSheduler, test_all_chunkServer_online_offline) { } { - // 6. 在chunkserver上创建copyset失败 + // 6. Failed to create copyset on chunkserver EXPECT_CALL(*topoAdapter_, GetStandardReplicaNumInLogicalPool(_)) .WillRepeatedly(Return(3)); std::vector chunkserverList( diff --git a/test/mds/schedule/scheduleMetrics_test.cpp b/test/mds/schedule/scheduleMetrics_test.cpp index 66969a6845..5ca3fc21f1 100644 --- a/test/mds/schedule/scheduleMetrics_test.cpp +++ b/test/mds/schedule/scheduleMetrics_test.cpp @@ -80,7 +80,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { addCsInfo.SetCopySetMembers(std::set{1, 2}); { - // 1. 增加normal级别/add类型的operator + // 1. Add operators of normal level/add type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 1}, _)) .WillOnce(DoAll(SetArgPointee<1>(addCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) @@ -143,7 +143,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_addOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(addOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->addOpNum.get_value()); @@ -160,7 +160,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { rmCsInfo.SetLeader(1); { - // 1. 增加high级别/remove类型的operator + // 1. Add high level/remove type operators EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 2}, _)) .WillOnce(DoAll(SetArgPointee<1>(rmCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) @@ -226,7 +226,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_rmOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(rmOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->removeOpNum.get_value()); @@ -243,7 +243,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { transCsInfo.SetLeader(1); { - // 1. 增加normal级别/transferleader类型的operator + // 1. Increase the operator of the normal level/transferleader type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) @@ -307,14 +307,14 @@ TEST_F(ScheduleMetricsTest, test_add_rm_transferOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(transferOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->transferOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the map scheduleMetrics->UpdateRemoveMetric(transferOp); } } @@ -327,7 +327,7 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { changeCsInfo.SetLeader(1); { - // 1. 增加normal级别/changePeer类型的operator + // 1. Increase operator of normal level/changePeer type EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 4}, _)) .WillOnce(DoAll(SetArgPointee<1>(changeCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) @@ -393,14 +393,14 @@ TEST_F(ScheduleMetricsTest, test_add_rm_changeOp) { } { - // 2. 移除 1中的operator + // 2. Remove operator from 1 scheduleMetrics->UpdateRemoveMetric(changeOp); ASSERT_EQ(0, scheduleMetrics->operatorNum.get_value()); ASSERT_EQ(0, scheduleMetrics->changeOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->normalOpNum.get_value()); ASSERT_EQ(0, scheduleMetrics->operators.size()); - // 移除map中不存在的metric应该没有问题 + // There should be no problem removing metrics that do not exist in the map scheduleMetrics->UpdateRemoveMetric(changeOp); } } @@ -412,7 +412,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { transCsInfo.SetCopySetMembers(std::set{1, 2, 3}); transCsInfo.SetLeader(1); - // 获取copyset失败 + // Failed to obtain copyset EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)).WillOnce(Return(false)); scheduleMetrics->UpdateAddMetric(transferOp); ASSERT_EQ(1, scheduleMetrics->operatorNum.get_value()); @@ -426,7 +426,7 @@ TEST_F(ScheduleMetricsTest, test_abnormal) { scheduleMetrics->UpdateRemoveMetric(transferOp); - // 获取chunkserver 或者 server失败 + // Failed to obtain chunkserver or server EXPECT_CALL(*topo, GetCopySet(CopySetKey{1, 3}, _)) .WillOnce(DoAll(SetArgPointee<1>(transCsInfo), Return(true))); EXPECT_CALL(*topo, GetHostNameAndPortById(1)) diff --git a/test/mds/schedule/scheduleService/scheduleService_test.cpp b/test/mds/schedule/scheduleService/scheduleService_test.cpp index 9814f8ce0b..416c2e05d9 100644 --- a/test/mds/schedule/scheduleService/scheduleService_test.cpp +++ b/test/mds/schedule/scheduleService/scheduleService_test.cpp @@ -75,7 +75,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { request.set_logicalpoolid(1); RapidLeaderScheduleResponse response; - // 1. 快速leader均衡返回成功 + // 1. Fast leader balance returned successfully { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeSuccess)); @@ -85,7 +85,7 @@ TEST_F(TestScheduleService, test_RapidLeaderSchedule) { ASSERT_EQ(kScheduleErrCodeSuccess, response.statuscode()); } - // 2. 传入的logicalpoolid不存在 + // 2. The logicaltool passed in does not exist { EXPECT_CALL(*coordinator_, RapidLeaderSchedule(1)) .WillOnce(Return(kScheduleErrCodeInvalidLogicalPool)); @@ -105,7 +105,7 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { request.add_chunkserverid(1); QueryChunkServerRecoverStatusResponse response; - // 1. 查询chunkserver恢复状态返回成功 + // 1. Querying the recovery status of chunkserver returned success { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( @@ -121,7 +121,7 @@ TEST_F(TestScheduleService, test_QueryChunkServerRecoverStatus) { ASSERT_TRUE(response.recoverstatusmap().begin()->second); } - // 2. 传入的chunkserverid不合法 + // 2. The chunkserverid passed in is illegal { std::map expectRes{{1, 1}}; EXPECT_CALL(*coordinator_, QueryChunkServerRecoverStatus( diff --git a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp index b8b3ddb148..c8d2950124 100644 --- a/test/mds/schedule/schedulerPOC/scheduler_poc.cpp +++ b/test/mds/schedule/schedulerPOC/scheduler_poc.cpp @@ -401,7 +401,7 @@ class CopysetSchedulerPOC : public testing::Test { void TearDown() override {} void PrintScatterWithInOnlineChunkServer(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; std::map factorMap; int max = -1; @@ -437,7 +437,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (online chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -446,14 +446,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << "Mean: " << avg << ", Variance: " << variance << ", Standard Deviation: " + << std::sqrt(variance) << ", Maximum Value: (" << max << "," << maxId << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintScatterWithInLogicalPool(PoolIdType lid = 0) { - // 打印初始每个chunkserver的scatter-with + // Print the initial scatter with for each chunkserver int sumFactor = 0; int max = -1; int maxId = -1; @@ -477,7 +477,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", scatter-with:" << factor; } - // 打印scatter-with的方差 + // Print variance of scatter-with LOG(INFO) << "scatter-with (all chunkserver): " << factorMap.size(); float avg = static_cast(sumFactor) / factorMap.size(); float variance = 0; @@ -486,14 +486,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= factorMap.size(); LOG(INFO) << "###print scatter-with in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << "Mean: " << avg << ", Variance: " << variance << ", Standard Deviation: " + << std::sqrt(variance) << ", Maximum Value: (" << max << "," << maxId << ")" - << ", 最小值:(" << min << "," << minId << ")"; + << ", Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInOnlineChunkServer(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -526,7 +526,7 @@ class CopysetSchedulerPOC : public testing::Test { << ", copyset num:" << number; } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -535,14 +535,14 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in online chunkserver###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << "Mean: " << avg << ", Variance: " << variance << ", Standard Deviation: " + << std::sqrt(variance) << ", Maximum Value: (" << max << "," << maxId << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << "), Minimum Value: (" << min << "," << minId << ")"; } void PrintCopySetNumInLogicalPool(PoolIdType lid = 0) { - // 打印每个chunksever上copyset的数量 + // Print the number of copysets on each chunksever std::map numberMap; int sumNumber = 0; int max = -1; @@ -561,7 +561,7 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 打印方差 + // Print Variance float avg = static_cast(sumNumber) / static_cast(numberMap.size()); float variance = 0; @@ -570,13 +570,13 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= numberMap.size(); LOG(INFO) << "###print copyset-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值: " << max << ", 最小值:" + << "Mean: " << avg << ", Variance: " << variance << ", Standard Deviation: " + << std::sqrt(variance) << ", Maximum Value: " << max << ", Minimum Value: " << min; } void PrintLeaderCountInChunkServer(PoolIdType lid = 0) { - // 打印每个chunkserver上leader的数量 + // Print the number of leaders on each chunkserver std::map leaderDistribute; int sumNumber = 0; int max = -1; @@ -612,10 +612,10 @@ class CopysetSchedulerPOC : public testing::Test { } variance /= leaderDistribute.size(); LOG(INFO) << "###print leader-num in cluster###\n" - << "均值:" << avg << ", 方差:" << variance << ", 标准差: " - << std::sqrt(variance) << ", 最大值:(" << max << "," << maxId + << "Mean: " << avg << ", Variance: " << variance << ", Standard Deviation: " + << std::sqrt(variance) << ", Maximum Value: (" << max << "," << maxId << ")" - << "), 最小值:(" << min << "," << minId << ")"; + << ", Minimum Value: (" << min << "," << minId << ")"; } int GetLeaderCountRange(PoolIdType lid = 0) { @@ -637,16 +637,16 @@ class CopysetSchedulerPOC : public testing::Test { return max - min; } - // 计算每个chunkserver的scatter-with + // Calculate the scatter with for each chunkserver int GetChunkServerScatterwith(ChunkServerIdType csId) { - // 计算chunkserver上的scatter-with + // Calculate scatter with on chunkserver std::map chunkServerCount; for (auto it : topo_->GetCopySetsInChunkServer(csId)) { // get copyset info ::curve::mds::topology::CopySetInfo info; topo_->GetCopySet(it, &info); - // 统计所分布的chunkserver + // Count the distributed chunkservers for (auto it : info.GetCopySetMembers()) { if (it == csId) { continue; @@ -673,11 +673,11 @@ class CopysetSchedulerPOC : public testing::Test { ChunkServerIdType RandomOfflineOneChunkServer(PoolIdType lid = 0) { auto chunkServers = topo_->GetChunkServerInLogicalPool(lid); - // 选择[0, chunkServers.size())中的index + // Select the index in [0, chunkServers.size()) std::srand(std::time(nullptr)); int index = std::rand() % chunkServers.size(); - // 设置目标chunkserver的状态为offline + // Set the status of the target chunkserver to offline auto it = chunkServers.begin(); std::advance(it, index); topo_->UpdateChunkServerOnlineState(OnlineState::OFFLINE, *it); @@ -781,8 +781,8 @@ class CopysetSchedulerPOC : public testing::Test { } } - // 有两个chunkserver offline的停止条件: - // 所有copyset均有两个及以上的副本offline + // There are two stopping conditions for chunkserver offline: + // All copysets have two or more copies offline bool SatisfyStopCondition(const std::set &idList) { std::vector<::curve::mds::topology::CopySetKey> copysetList; for (auto id : idList) { @@ -831,58 +831,58 @@ class CopysetSchedulerPOC : public testing::Test { }; TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_1) { - // 测试一个chunkserver offline恢复后的情况 - // 1. 创建recoverScheduler + // Testing the situation of a chunkserver offline recovery + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择一个chunkserver处于offline状态 + // 2. Select any chunkserver to be offline ChunkServerIdType choose = RandomOfflineOneChunkServer(); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); // update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // =============================结果====================================== - // ===========================集群初始状态================================= + // =============================Result====================================== + // =============================Initial state of the cluster============================= // ###print scatter-with in cluster### - // 均值:97.9556, 方差:11.5314, 标准差: 3.39579, 最大值:106, 最小值:88 + // Mean: 97.9556, Variance: 11.5314, Standard Deviation: 3.39579, Max: 106, Min: 88 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ==========================恢复之后的状态================================= + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // =============================Status after Recovery================================= // //NOLINT // ###print scatter-with in online chunkserver### - // 均值:均值:98.8156, 方差:10.3403, 标准差: 3.21564, 最大值:106, - // 最小值:95 //NOLINT + // Mean: 98.8156, variance: 10.3403, standard deviation: 3.21564, maximum value: 106, + // Minimum value: 95//NOLINT // ###print scatter-with in cluster### - // 均值:98.2667, 方差:64.2289, 标准差: 8.0143, 最大值:106, 最小值:0 + // Mean: 98.2667, Variance: 64.2289, Standard Deviation: 8.0143, Max: 106, Min: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.77729, 标准差: 1.33315, 最大值:109, 最小值:100 + // Mean value: 100.559, variance: 1.77729, standard deviation: 1.33315, maximum value: 109, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.6333, 标准差: 7.59166, 最大值: 109, 最小值:0 + // Mean value: 100, variance: 57.6333, standard deviation: 7.59166, maximum value: 109, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline的情况 - // 1. 创建recoverScheduler + // Testing the situation of another chunkserver offline during the recovery process of one chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; ChunkServerIdType choose1 = 0; ChunkServerIdType choose2 = 0; choose1 = RandomOfflineOneChunkServer(); idlist.emplace(choose1); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -896,35 +896,35 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_2) { ApplyOperatorsInOpController(std::set{choose2}); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the cluster=============================== // ###print scatter-with in cluster### - // 均值:97.3, 方差:9.89889, 标准差:3.14625, 最大值:106, 最小值:89 + // Mean value: 97.3, variance: 9.89889, standard deviation: 3.14625, maximum value: 106, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.348, 方差:7.47418, 标准差: 2.73389, 最大值:108, 最小值:101 + // Mean value: 100.348, variance: 7.47418, standard deviation: 2.73389, maximum value: 108, minimum value: 101 // ###print scatter-with in cluster### - // 均值:99.2333, 方差:118.034, 标准差: 10.8644, 最大值:108, 最小值:0 + // Mean value: 99.2333, variance: 118.034, standard deviation: 10.8644, maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.9735, 标准差: 1.72438, 最大值:112, 最小值:100 + // Mean value: 101.124, variance: 2.9735, standard deviation: 1.72438, maximum value: 112, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.3, 标准差: 10.7378, 最大值: 112, 最小值:0 + // Mean value: 100, variance: 115.3, standard deviation: 10.7378, maximum value: 112, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 1. 创建recoverScheduler + // During the recovery process of testing a chunkserver offline, there were 5 consecutive chunkserver offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 6; i++) { @@ -934,7 +934,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -950,35 +950,35 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_3) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum value: 121, minimum value: 0 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { - // 测试20个chunkserver 接连 offline - // 1. 创建recoverScheduler + // Test 20 chunkservers connected offline + // 1. Create recoverScheduler BuilRecoverScheduler(1); - // 2. 任意选择两个chunkserver处于offline状态 + // 2. Choose any two chunkservers to be offline std::set idlist; std::vector origin; for (int i = 0; i < 20; i++) { @@ -988,7 +988,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1004,7 +1004,7 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { ApplyOperatorsInOpController(idlist); } while (!SatisfyStopCondition(idlist)); - // 4. 打印最终的scatter-with + // 4. Print the final scatter-with PrintScatterWithInOnlineChunkServer(); PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); @@ -1012,24 +1012,24 @@ TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_recover_4) { } TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { - // 测试一个server有多个chunkserver offline, 有一个被设置为pending, - // 可以recover的情况 + // Testing a server with multiple chunkservers offline, with one set to pending, + // Conditions that can be recovered offlineTolerent_ = 20; BuilRecoverScheduler(4); - // offline一个server上的chunkserver + // Offline Chunkserver on a server auto chunkserverSet = OfflineChunkServerInServer1(); - // 选择其中一个设置为pendding状态 + // Select one of the settings as pending status ChunkServerIdType target = *chunkserverSet.begin(); topo_->UpdateChunkServerRwState(ChunkServerStatus::PENDDING, target); int opNum = 0; int targetOpNum = topo_->GetCopySetsInChunkServer(target).size(); - // 开始恢复 + // Start recovery do { recoverScheduler_->Schedule(); opNum += opController_->GetOperators().size(); - // apply operator, 把copyset更新到topology + // Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{target}); } while (topo_->GetCopySetsInChunkServer(target).size() > 0); @@ -1038,14 +1038,14 @@ TEST_F(CopysetSchedulerPOC, test_chunkserver_offline_over_concurrency) { TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_1) { // NOLINT - // 测试一个chunkserver offline, 集群回迁的情况 + // Testing a cluster offline and cluster fetch situation - // 1. 一个chunkserver offline后恢复 + // 1. Restore after a chunkserver is offline BuilRecoverScheduler(1); ChunkServerIdType choose = RandomOfflineOneChunkServer(); do { recoverScheduler_->Schedule(); - // apply operator, 把copyset更新到topology + //Apply operator, update copyset to topology ApplyOperatorsInOpController(std::set{choose}); } while (topo_->GetCopySetsInChunkServer(choose).size() > 0); @@ -1053,23 +1053,23 @@ TEST_F(CopysetSchedulerPOC, PrintScatterWithInLogicalPool(); PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ====================================Result==================================== + // ====================================Initial state of the cluster================================= // ###print scatter-with in cluster### - // 均值:97.6667, 方差:10.9444, 标准差: 3.30824, 最大值:107, 最小值:90 + // Mean value: 97.6667, variance: 10.9444, standard deviation: 3.30824, maximum value: 107, minimum value: 90 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ====================================Status after Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:99.1061, 方差:10.1172, 标准差: 3.18076, 最大值:108, 最小值:91 + // Mean value: 99.1061, variance: 10.1172, standard deviation: 3.18076, maximum value: 108, minimum value: 91 // ###print scatter-with in cluster### - // 均值:98.5556, 方差:64.3247, 标准差: 8.02027, 最大值:108, 最小值:0 + // Mean value: 98.5556, variance: 64.3247, standard deviation: 8.02027, maximum value: 108, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:100.559, 方差:1.56499, 标准差: 1.251, 最大值:107, 最小值:100 + // Mean value: 100.559, variance: 1.56499, standard deviation: 1.251, maximum value: 107, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:57.4222, 标准差: 7.57774, 最大值: 107, 最小值:0 + // Mean value: 100, variance: 57.4222, standard deviation: 7.57774, maximum value: 107, minimum value: 0 - // 2. chunkserver-choose恢复成online状态 + // 2. Chunkserver house restore to online state SetChunkServerOnline(choose); BuildCopySetScheduler(1); std::vector csList; @@ -1087,20 +1087,20 @@ TEST_F(CopysetSchedulerPOC, minScatterwidth_ * (1 + scatterwidthPercent_)); ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ====================================Result==================================== + // ====================================Status after Migration================================= // ###print scatter-with in cluster### - // 均值:99.2667, 方差:9.65111, 标准差: 3.10662, 最大值:109, 最小值:91 + // Mean value: 99.2667, variance: 9.65111, standard deviation: 3.10662, maximum value: 109, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:0.5, 标准差: 0.707107, 最大值: 101, 最小值:91 + // Mean value: 100, variance: 0.5, standard deviation: 0.707107, maximum value: 101, minimum value: 91 } TEST_F(CopysetSchedulerPOC, DISABLED_test_scatterwith_after_copysetRebalance_2) { // NOLINT - // 测试一个chunkserver offline恢复过程中,另一个chunkserver offline - // 集群回迁的情况 + // During the recovery process of testing one chunkserver offline, another chunkserver offline + // Cluster fetch situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; ChunkServerIdType choose1 = 0; @@ -1124,23 +1124,23 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果=================================== - // =========================集群初始状态=============================== + // ===================================Result=================================== + // ===================================Initial state of the cluster=============================== // ###print scatter-with in cluster### - // 均值:97.4889, 方差:9.96099, 标准差: 3.1561, 最大值:105, 最小值:89 + // Mean value: 97.4889, variance: 9.96099, standard deviation: 3.1561, maximum value: 105, minimum value: 89 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // =========================恢复之后的状态============================== + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after Recovery============================== // ###print scatter-with in online chunkserver### - // 均值:100.472, 方差:7.37281, 标准差: 2.71529, 最大值:106, 最小值:91 + // Mean value: 100.472, variance: 7.37281, standard deviation: 2.71529, maximum value: 106, minimum value: 91 // ###print scatter-with in cluster### - // 均值:99.3556, 方差:118.207, 标准差: 10.8723, 最大值:106, 最小值:0 + // Mean value: 99.3556, variance: 118.207, standard deviation: 10.8723, maximum value: 106, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:101.124, 方差:2.77125, 标准差: 1.66471, 最大值:111, 最小值:100 + // Mean value: 101.124, variance: 2.77125, standard deviation: 1.66471, maximum value: 111, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:115.1, 标准差: 10.7285, 最大值: 111, 最小值:0 + // Mean value: 100, variance: 115.1, standard deviation: 10.7285, maximum value: 111, minimum value: 0 - // 2. cchunkserver恢复成online状态 + // 2. Restore cchunkserver to online state SetChunkServerOnline(choose1); SetChunkServerOnline(choose2); BuildCopySetScheduler(1); @@ -1152,20 +1152,20 @@ TEST_F(CopysetSchedulerPOC, } while (removeOne > 0); PrintScatterWithInLogicalPool(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, test_scatterwith_after_copysetRebalance_3) { // NOLINT - // 测试一个chunkserver offline恢复过程中,接连有5个chunkserver offline - // 回迁的情况 + // During the recovery process of testing a chunkserver offline, there were 5 consecutive chunkserver offline + // Migration situation - // 1. chunkserver offline后恢复 + // 1. Restore after chunkserver offline BuilRecoverScheduler(1); std::set idlist; std::vector origin; @@ -1176,7 +1176,7 @@ TEST_F(CopysetSchedulerPOC, origin[0] = RandomOfflineOneChunkServer(); idlist.emplace(origin[0]); - // 3. 生成operator直到choose上没有copyset为止 + // 3. Generate operator until there is no copyset on choose do { recoverScheduler_->Schedule(); @@ -1197,23 +1197,23 @@ TEST_F(CopysetSchedulerPOC, PrintCopySetNumInOnlineChunkServer(); PrintCopySetNumInLogicalPool(); - // ============================结果==================================== - // ========================集群初始状态================================= + // ===================================Result==================================== + // ===================================Initial state of the cluster================================= // ###print scatter-with in cluster### - // 均值:97.6, 方差:11.8067, 标准差: 3.43608, 最大值:105, 最小值:87 + // Mean value: 97.6, variance: 11.8067, standard deviation: 3.43608, maximum value: 105, minimum value: 87 // ###print copyset-num in cluster### - // 均值:100, 方差:0, 标准差: 0, 最大值: 100, 最小值:100 - // ========================恢复之后的状态================================ + // Mean: 100, Variance: 0, Standard Deviation: 0, Max: 100, Min: 100 + // ===================================Status after Recovery================================ // ###print scatter-with in online chunkserver### - // 均值:105.425, 方差:9.95706, 标准差: 3.15548, 最大值:116, 最小值:103 + // Mean value: 105.425, variance: 9.95706, standard deviation: 3.15548, maximum value: 116, minimum value: 103 // ###print scatter-with in cluster### - // 均值:101.933, 方差:363.262, 标准差: 19.0594, 最大值:116, 最小值:0 + // Mean value: 101.933, variance: 363.262, standard deviation: 19.0594, maximum value: 116, minimum value: 0 // ###print copyset-num in online chunkserver### - // 均值:103.425, 方差:13.164, 标准差: 3.62822, 最大值:121, 最小值:100 + // Mean value: 103.425, variance: 13.164, standard deviation: 3.62822, maximum value: 121, minimum value: 100 // ###print copyset-num in cluster### - // 均值:100, 方差:352.989, 标准差: 18.788, 最大值: 121, 最小值:0 + // Mean value: 100, variance: 352.989, standard deviation: 18.788, maximum value: 121, minimum value: 0 - // 2. chunkserver恢复成online状态 + // 2. Chunkserver restored to online state SetChunkServerOnline(idlist); BuildCopySetScheduler(1); std::vector csList; @@ -1235,12 +1235,12 @@ TEST_F(CopysetSchedulerPOC, ASSERT_TRUE(GetChunkServerScatterwith(choose) >= minScatterwidth_); } - // ============================结果==================================== - // ========================迁移后的状态================================= + // ===================================Result==================================== + // ===================================Status after Migration================================= // ###print scatter-with in cluster### - // 均值:100.556, 方差:8.18025, 标准差: 2.86011, 最大值:107, 最小值:91 + // Mean value: 100.556, variance: 8.18025, standard deviation: 2.86011, maximum value: 107, minimum value: 91 // ###print copyset-num in cluster### - // 均值:100, 方差:1, 标准差: 1, 最大值: 101, 最小值:91 + // Mean: 100, Variance: 1, Standard Deviation: 1, Maximum: 101, Minimum: 91 } TEST_F(CopysetSchedulerPOC, diff --git a/test/mds/schedule/scheduler_helper_test.cpp b/test/mds/schedule/scheduler_helper_test.cpp index ff54d4c5bf..0a3ae43dd3 100644 --- a/test/mds/schedule/scheduler_helper_test.cpp +++ b/test/mds/schedule/scheduler_helper_test.cpp @@ -56,63 +56,63 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = true; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it increased the scatter-width int oldValue = 10; int newValue = 13; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the scatter-width remains unchanged int oldValue = 10; int newValue = 10; ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the scatter-width decreased int oldValue = 10; int newValue = 8; ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, @@ -126,63 +126,63 @@ TEST_F(TestSchedulerHelper, test_SatisfyScatterWidth_not_target) { int maxScatterWidth = minScatterWidth * (1 + scatterWidthRangePerent); bool target = false; { - // 1. 变更之后未达到最小值,但使得scatter-width增大 + // 1. After the change, the minimum value was not reached, but it increased the scatter-width int oldValue = 10; int newValue = 13; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 2. 变更之后未达到最小值,scattter-width不变 + // 2. After the change, the minimum value is not reached, and the scatter-width remains unchanged int oldValue = 10; int newValue = 10; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 3. 变更之后未达到最小值,scatter-width减小 + // 3. After the change, the minimum value was not reached and the scatter-width decreased int oldValue = 10; int newValue = 8; ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 4. 变更之后等于最小值 + // 4. Equal to minimum value after change int oldValue = minScatterWidth + 2; int newValue = minScatterWidth; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 5. 变更之后大于最小值,小于最大值 + // 5. After the change, it is greater than the minimum value and less than the maximum value int oldValue = minScatterWidth; int newValue = minScatterWidth + 2; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 6. 变更之后等于最大值 + // 6. Equal to maximum value after change int oldValue = maxScatterWidth - 2; int newValue = maxScatterWidth; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 7. 变更之后大于最大值,scatter-width增大 + // 7. After the change, it is greater than the maximum value and the scatter-width increases int oldValue = maxScatterWidth + 1; int newValue = maxScatterWidth + 2; ASSERT_FALSE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 8. 变更之后大于最大值,scatter-width不变 + // 8. After the change, it is greater than the maximum value, and the scatter-width remains unchanged int oldValue = maxScatterWidth + 2; int newValue = maxScatterWidth + 2; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, newValue, minScatterWidth, scatterWidthRangePerent)); } { - // 9. 变更之后大于最大值,scatter-width减小 + // 9. After the change is greater than the maximum value, the scatter-width decreases int oldValue = maxScatterWidth + 3; int newValue = maxScatterWidth + 2; ASSERT_TRUE(SchedulerHelper::SatisfyScatterWidth(target, oldValue, @@ -195,7 +195,7 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { ChunkServerIdType source = 1; ChunkServerIdType target = 4; { - // 1. 获取target的信息失败 + // 1. Failed to obtain information for target EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(Return(false)); ASSERT_FALSE(SchedulerHelper::SatisfyZoneAndScatterWidthLimit( @@ -206,7 +206,7 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { ChunkServerInfo info4(peer4, OnlineState::ONLINE, DiskState::DISKERROR, ChunkServerStatus::READWRITE, 1, 1, 1, ChunkServerStatisticInfo{}); { - // 2. 获取到的标准zoneNum = 0 + // 2. Obtained standard zoneNum=0 EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) @@ -216,7 +216,7 @@ TEST_F(TestSchedulerHelper, test_SatisfyZoneAndScatterWidthLimit) { } { - // 3. 迁移之后不符合zone条件 + // 3. Does not meet zone conditions after migration EXPECT_CALL(*topoAdapter_, GetChunkServerInfo(4, _)) .WillOnce(DoAll(SetArgPointee<1>(info4), Return(true))); EXPECT_CALL(*topoAdapter_, GetStandardZoneNumInLogicalPool(1)) @@ -285,16 +285,16 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration) { .WillOnce(SetArgPointee<1>(replica3Map)); SchedulerHelper::CalculateAffectOfMigration( copyset, source, target, topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于target, old=1, new=2 + // For target, old=1, new=2 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(2, scatterWidth[target].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -327,16 +327,16 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_source) { SchedulerHelper::CalculateAffectOfMigration( copyset, source, target, topoAdapter_, &scatterWidth); - // 对于target, old=1, new=3 + // For target, old=1, new=3 ASSERT_EQ(1, scatterWidth[target].first); ASSERT_EQ(3, scatterWidth[target].second); - // 对于replica1, old=2, new=3 + // For replica1, old=2, new=3 ASSERT_EQ(2, scatterWidth[1].first); ASSERT_EQ(3, scatterWidth[1].second); - // 对于replica2, old=3, new=3 + // For replica2, old=3, new=3 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(3, scatterWidth[2].second); - // 对于replica3, old=2, new=3 + // For replica3, old=2, new=3 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(3, scatterWidth[3].second); } @@ -365,13 +365,13 @@ TEST_F(TestSchedulerHelper, test_CalculateAffectOfMigration_no_target) { SchedulerHelper::CalculateAffectOfMigration( copyset, source, target, topoAdapter_, &scatterWidth); - // 对于source, old=2, new=1 + // For source, old=2, new=1 ASSERT_EQ(2, scatterWidth[source].first); ASSERT_EQ(1, scatterWidth[source].second); - // 对于replica2, old=3, new=2 + // For replica2, old=3, new=2 ASSERT_EQ(3, scatterWidth[2].first); ASSERT_EQ(2, scatterWidth[2].second); - // 对于replica3, old=2, new=2 + // For replica3, old=2, new=2 ASSERT_EQ(2, scatterWidth[3].first); ASSERT_EQ(2, scatterWidth[3].second); } diff --git a/test/mds/server/mds_test.cpp b/test/mds/server/mds_test.cpp index 236e526371..b8503317df 100644 --- a/test/mds/server/mds_test.cpp +++ b/test/mds/server/mds_test.cpp @@ -55,8 +55,8 @@ class MDSTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", "http://localhost:10032", @@ -66,7 +66,7 @@ class MDSTest : public ::testing::Test { nullptr)); exit(0); } - // 一定时间内尝试init直到etcd完全起来 + // Try init for a certain period of time until etcd is fully recovered auto client = std::make_shared(); EtcdConf conf = {kEtcdAddr, static_cast(strlen(kEtcdAddr)), 1000}; uint64_t now = ::curve::common::TimeUtility::GetTimeofDaySec(); @@ -102,7 +102,7 @@ class MDSTest : public ::testing::Test { }; TEST_F(MDSTest, common) { - // 加载配置 + // Load Configuration std::string confPath = "./conf/mds.conf"; auto conf = std::make_shared(); conf->SetConfigPath(confPath); @@ -116,7 +116,7 @@ TEST_F(MDSTest, common) { mds.InitMdsOptions(conf); mds.StartDummy(); - // 从dummy server获取version和mds监听端口 + // Obtain version and mds listening ports from dummy server brpc::Channel httpChannel; brpc::Controller cntl; brpc::ChannelOptions options; @@ -124,12 +124,12 @@ TEST_F(MDSTest, common) { std::string dummyAddr = "127.0.0.1:" + std::to_string(kDummyPort); ASSERT_EQ(0, httpChannel.Init(dummyAddr.c_str(), &options)); - // 测试获取version + // Test to obtain version cntl.http_request().uri() = dummyAddr + "/vars/curve_version"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); ASSERT_FALSE(cntl.Failed()); - // 测试获取mds监听端口 + // Testing to obtain the mds listening port cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_config_mds_listen_addr"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -140,13 +140,13 @@ TEST_F(MDSTest, common) { auto pos = attachment.find(":"); ASSERT_NE(std::string::npos, pos); std::string jsonString = attachment.substr(pos + 2); - // 去除两端引号 + // Remove double quotes jsonString = jsonString.substr(1, jsonString.size() - 2); reader.parse(jsonString, value); std::string mdsAddr = value["conf_value"].asString(); ASSERT_EQ(kMdsAddr, mdsAddr); - // 获取leader状态,此时mds_status应为follower + // Obtain the leader status, at which point mds_status should be follower cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/mds_status"; httpChannel.CallMethod(NULL, &cntl, NULL, NULL, NULL); @@ -156,7 +156,7 @@ TEST_F(MDSTest, common) { mds.StartCompaginLeader(); - // 此时isLeader应为true + // At this point, isLeader should be true cntl.Reset(); cntl.http_request().uri() = dummyAddr + "/vars/is_leader"; ASSERT_FALSE(cntl.Failed()); @@ -164,7 +164,7 @@ TEST_F(MDSTest, common) { cntl.response_attachment().to_string().find("leader")); mds.Init(); - // 启动mds + // Start mds Thread mdsThread(&MDS::Run, &mds); // sleep 5s sleep(5); @@ -172,7 +172,7 @@ TEST_F(MDSTest, common) { // 1、init channel ASSERT_EQ(0, channel_.Init(kMdsAddr.c_str(), nullptr)); - // 2、测试hearbeat接口 + // 2. Test the heartbeat interface cntl.Reset(); heartbeat::ChunkServerHeartbeatRequest request1; heartbeat::ChunkServerHeartbeatResponse response1; @@ -193,7 +193,7 @@ TEST_F(MDSTest, common) { stub1.ChunkServerHeartbeat(&cntl, &request1, &response1, nullptr); ASSERT_FALSE(cntl.Failed()); - // 3、测试namespaceService接口 + // 3. Test the namespaceService interface cntl.Reset(); GetFileInfoRequest request2; GetFileInfoResponse response2; @@ -205,7 +205,7 @@ TEST_F(MDSTest, common) { stub2.GetFileInfo(&cntl, &request2, &response2, nullptr); ASSERT_FALSE(cntl.Failed()); - // 4、测试topology接口 + // 4. Testing the topology interface cntl.Reset(); topology::ListPhysicalPoolRequest request3; topology::ListPhysicalPoolResponse response3; @@ -213,7 +213,7 @@ TEST_F(MDSTest, common) { stub3.ListPhysicalPool(&cntl, &request3, &response3, nullptr); ASSERT_FALSE(cntl.Failed()); - // 5、停掉mds + // 5. Stop the MDS uint64_t startTime = curve::common::TimeUtility::GetTimeofDayMs(); mds.Stop(); mdsThread.join(); diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index c1e878deb4..4196e73c9e 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -1668,11 +1668,11 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(100, pool.GetDiskCapacity()); - // 只刷一次 + // Only brush once EXPECT_CALL(*storage_, UpdateChunkServer(_)) .WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -1708,11 +1708,11 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 + // Only brush once EXPECT_CALL(*storage_, UpdateChunkServer(_)) .WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } @@ -2716,11 +2716,11 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 + // Only brush once EXPECT_CALL(*storage_, UpdateCopySet(_)) .WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep waiting to flush the database sleep(5); topology_->Stop(); } diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index a1ea8aa942..e9c6201d76 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -573,7 +573,7 @@ TEST_F(TestTopologyChunkAllocator, } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each time std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 2000; i++) { @@ -621,7 +621,7 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each time std::vector copySetIds; for (int i = 0; i < 2000; i++) { copySetIds.push_back(i); @@ -719,7 +719,7 @@ TEST(TestAllocateChunkPolicy, ASSERT_TRUE(poolMap[1] < poolMap[2]); ASSERT_TRUE(poolMap[2] < poolMap[3]); ASSERT_TRUE(poolMap[3] < poolMap[4]); - // 5个池大概分布因该是0, 10000,20000,30000,40000 + // The approximate distribution of 5 pools should be 0, 10000, 20000, 30000, 40000 LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -743,7 +743,7 @@ TEST(TestAllocateChunkPolicy, poolMap[pid] -= 1; } - // 测试是否能逐渐拉平pool之间差距 + // Test to see if it is possible to gradually equalize the gap between pools LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -751,7 +751,7 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -// 测试能否随机到每个pool +// Test to see if random allocation to each pool is possible TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolRandom) { std::vector pools = {1, 2, 3, 4, 5}; diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index 2a38263784..bc1610921b 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -48,7 +48,7 @@ class TestTopologyMetric : public ::testing::Test { idGenerator_ = std::make_shared(); tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); - // 使用真实的topology + // Using real topology topology_ = std::make_shared(idGenerator_, tokenGenerator_, storage_); diff --git a/test/resources.list b/test/resources.list index 9be11dbb07..20b047da17 100644 --- a/test/resources.list +++ b/test/resources.list @@ -18,30 +18,30 @@ Used port list: # client - 9101: session service 测试 - 9102: applyindex service 测试 - 9103: snapshot service 测试 - 9104: client端其他测试 - 9105: client workflow测试mds占用 - 9106: client workflow测试chunkserver占用 - 9107: client workflow测试chunkserver占用 - 9108: client workflow测试chunkserver占用 - 9109: request scheduler测试占用 - 9110/9111/9112: TestLibcbdLibcurve测试占用 - 9115/9116/9117: TestLibcurveInterface测试占用 - - 9120: mds 接口测试 - 9121: mds 接口测试 - 9122: mds 接口测试 - 9123: mds 接口测试 - 9130: metric测试 - 9131: metric测试 - 9132: metric测试 - 9140: metric测试 - 9141: metric测试 - 9142: metric测试 - 9150/9151 ChunkserverUnstableTest - 19151/19110/19111/19112 curveClient测试 + 9101: session service testing + 9102: applyindex service testing + 9103: snapshot service testing + 9104: Other client testing + 9105: client workflow testing, MDS usage + 9106: client workflow testing, Chunkserver usage + 9107: client workflow testing, Chunkserver usage + 9108: client workflow testing, Chunkserver usage + 9109: request scheduler testing usage + 9110/9111/9112: TestLibcbdLibcurve testing usage + 9115/9116/9117: TestLibcurveInterface testing usage + + 9120: MDS interface testing + 9121: MDS interface testing + 9122: MDS interface testing + 9123: MDS interface testing + 9130: metric testing + 9131: metric testing + 9132: metric testing + 9140: metric testing + 9141: metric testing + 9142: metric testing + 9150/9151: ChunkserverUnstableTest + 19151/19110/19111/19112: curveClient testing client_test_unittest: 21000 diff --git a/test/snapshotcloneserver/test_clone_core.cpp b/test/snapshotcloneserver/test_clone_core.cpp index f57c2d15c0..35cc293f2f 100644 --- a/test/snapshotcloneserver/test_clone_core.cpp +++ b/test/snapshotcloneserver/test_clone_core.cpp @@ -86,7 +86,7 @@ class TestCloneCoreImpl : public ::testing::Test { } protected: - // 辅助mock函数 + // Auxiliary mock function void MockBuildFileInfoFromSnapshotSuccess( std::shared_ptr task); diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index b4e79b17b0..45c60ba480 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -40,7 +40,7 @@ class TestCurveFsClientImpl : public ::testing::Test { static void SetUpTestCase() { ClientConfigGenerator gentor(kClientConfigPath); - // 把超时时间和重试次数改小,已使得测试尽快完成 + // Reducing the timeout and retry times has enabled the testing to complete as soon as possible std::vector options = { {"mds.listen.addr=127.0.0.1:8888", "mds.registerToMDS=false", @@ -159,7 +159,7 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->GetFileInfo("file1", clientOption_.mdsRootUser, &fInfo); ASSERT_LT(ret, 0); - // client 对mds接口无限重试,这两个接口死循环,先注释掉 + // The client retries the mds interface infinitely, and these two interfaces loop endlessly. Please comment them out first // ret = client_->GetOrAllocateSegmentInfo( // true, 0, &fInfo, "user1", &segInfo); // ASSERT_LT(ret, 0); diff --git a/test/snapshotcloneserver/test_snapshot_core.cpp b/test/snapshotcloneserver/test_snapshot_core.cpp index d4c40963f1..d6d73ce131 100644 --- a/test/snapshotcloneserver/test_snapshot_core.cpp +++ b/test/snapshotcloneserver/test_snapshot_core.cpp @@ -989,7 +989,7 @@ TEST_F(TestSnapshotCoreImpl, std::vector snapInfos; SnapshotInfo info2(uuid2, user, fileName, desc2); info.SetSeqNum(seqNum); - info2.SetSeqNum(seqNum - 1); // 上一个快照 + info2.SetSeqNum(seqNum - 1); // Previous snapshot info2.SetStatus(Status::done); snapInfos.push_back(info); snapInfos.push_back(info2); @@ -2728,7 +2728,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) .WillOnce(Invoke([task](const std::string &filename, @@ -2742,7 +2742,7 @@ TEST_F(TestSnapshotCoreImpl, TestHandleCreateSnapshotTaskCancelSuccess) { EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -2794,14 +2794,14 @@ TEST_F(TestSnapshotCoreImpl, Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*metaStore_, CASSnapshot(_, _)) .WillOnce(Invoke([task](const UUID& uuid, CASFunc cas) { task->Cancel(); return kErrCodeSuccess; })); - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -2896,7 +2896,7 @@ TEST_F(TestSnapshotCoreImpl, .WillRepeatedly(DoAll(SetArgPointee<1>(chunkInfo), Return(LIBCURVE_ERROR::OK))); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*dataStore_, PutChunkIndexData(_, _)) .WillOnce(Invoke([task](const ChunkIndexDataName &name, const ChunkIndexData &meta) { @@ -2905,7 +2905,7 @@ TEST_F(TestSnapshotCoreImpl, })); - // 进入cancel + // Enter cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Return(LIBCURVE_ERROR::OK)); @@ -3060,7 +3060,7 @@ TEST_F(TestSnapshotCoreImpl, .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Invoke([task](const std::string &filename, const std::string &user, @@ -3072,7 +3072,7 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .WillRepeatedly(Return(true)); @@ -3219,7 +3219,7 @@ TEST_F(TestSnapshotCoreImpl, .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .WillOnce(Invoke([task](const std::string &filename, const std::string &user, @@ -3231,7 +3231,7 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); @@ -3383,7 +3383,7 @@ TEST_F(TestSnapshotCoreImpl, .Times(4) .WillRepeatedly(Return(kErrCodeSuccess)); - // 此处捕获task,设置cancel + // Capture task here and set cancel EXPECT_CALL(*client_, DeleteSnapshot(fileName, user, seqNum)) .Times(2) .WillOnce(Invoke([task](const std::string &filename, @@ -3397,7 +3397,7 @@ TEST_F(TestSnapshotCoreImpl, EXPECT_CALL(*client_, CheckSnapShotStatus(_, _, _, _)) .WillRepeatedly(Return(-LIBCURVE_ERROR::NOTEXIST)); - // 进入cancel + // Enter cancel EXPECT_CALL(*dataStore_, ChunkDataExist(_)) .Times(4) .WillRepeatedly(Return(true)); diff --git a/test/snapshotcloneserver/test_snapshot_service_manager.cpp b/test/snapshotcloneserver/test_snapshot_service_manager.cpp index ba51d90f98..1c2a391b8e 100644 --- a/test/snapshotcloneserver/test_snapshot_service_manager.cpp +++ b/test/snapshotcloneserver/test_snapshot_service_manager.cpp @@ -428,7 +428,7 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -// 删除转cancel用例 +// Delete to cancel use case TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -1086,7 +1086,7 @@ TEST_F(TestSnapshotServiceManager, cond2.Signal(); })); - // 取消排队的快照会调一次 + // Unqueued snapshots will be called once EXPECT_CALL(*core_, HandleCancelUnSchduledSnapshotTask(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -1107,7 +1107,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); - // 再打一个快照,覆盖排队的情况 + // Take another snapshot to cover the queuing situation ret = manager_->CreateSnapshot( file, user, @@ -1116,7 +1116,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid2, uuidOut2); - // 先取消在排队的快照 + // Cancel queued snapshots first ret = manager_->CancelSnapshot(uuidOut2, user, file); diff --git a/test/tools/chunkserver_client_test.cpp b/test/tools/chunkserver_client_test.cpp index 9af94d01d3..7e79950233 100644 --- a/test/tools/chunkserver_client_test.cpp +++ b/test/tools/chunkserver_client_test.cpp @@ -61,17 +61,17 @@ TEST_F(ChunkServerClientTest, Init) { TEST_F(ChunkServerClientTest, GetRaftStatus) { std::vector statServices = fakemds.GetRaftStateService(); - // 正常情况 + // Normal situation butil::IOBuf iobuf; iobuf.append("test"); statServices[0]->SetBuf(iobuf); ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetRaftStatus(&iobuf)); - // 传入空指针 + // Incoming null pointer ASSERT_EQ(-1, client.GetRaftStatus(nullptr)); - // RPC失败的情况 + // The situation of RPC failure statServices[0]->SetFailed(true); ASSERT_EQ(-1, client.GetRaftStatus(&iobuf)); } @@ -85,11 +85,11 @@ TEST_F(ChunkServerClientTest, CheckChunkServerOnline) { std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkInfo(fakeret.get()); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(true, client.CheckChunkServerOnline()); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(false, client.CheckChunkServerOnline()); } @@ -105,16 +105,16 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { request.set_allocated_peer(peer); request.set_queryhash(true); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetCopysetStatus(request, &response)); - // 返回码不ok的情况 + // The situation where the return code is not OK copysetServices[0]->SetStatus( COPYSET_OP_STATUS::COPYSET_OP_STATUS_COPYSET_NOTEXIST); ASSERT_EQ(-1, client.GetCopysetStatus(request, &response)); - // RPC失败的情况 + // The situation of RPC failure brpc::Controller cntl; std::unique_ptr fakeret(new FakeReturn(&cntl, nullptr)); copysetServices[0]->SetFakeReturn(fakeret.get()); @@ -132,17 +132,17 @@ TEST_F(ChunkServerClientTest, GetChunkHash) { new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkHash(fakeret.get()); Chunk chunk(1, 100, 1001); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); std::string hash; ASSERT_EQ(0, client.GetChunkHash(chunk, &hash)); ASSERT_EQ("1234", hash); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); - // 返回码不为ok + // The return code is not OK response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); } diff --git a/test/tools/config/data_check.conf b/test/tools/config/data_check.conf index 7380f75bd5..0f93452c72 100644 --- a/test/tools/config/data_check.conf +++ b/test/tools/config/data_check.conf @@ -15,131 +15,131 @@ # # -# mds一侧配置信息 +# MDS side configuration information # -# mds的地址信息 +# Address information of mds mds.listen.addr=127.0.0.1:9160 -# 初始化阶段向mds注册开关,默认为开 +# Register switch with mds during initialization phase, default to on mds.registerToMDS=true -# 与mds通信的超时时间 +# Time out for communication with mds mds.rpcTimeoutMS=1000 -# 与mds通信最大的超时时间, 指数退避的超时间不能超过这个值 +# The maximum timeout time for communication with MDS, and the timeout for exponential backoff cannot exceed this value mds.maxRPCTimeoutMS=2000 -# 在当前mds上连续重试次数超过该限制就切换 +# Switch if the number of consecutive retries on the current mds exceeds this limit mds.maxFailedTimesBeforeChangeMDS=5 -# 与MDS一侧保持一个lease时间内多少次续约 +# How many renewals are there within a lease period with MDS mds.refreshTimesPerLease=4 -# mds RPC接口每次重试之前需要先睡眠一段时间 +# The mds RPC interface requires a period of sleep before each retry mds.rpcRetryIntervalUS=100000 # -################# metacache配置信息 ################ +################# Metacache Configuration Information ################ # -# 获取leader的rpc超时时间 +# Obtain the rpc timeout of the leader metacache.getLeaderTimeOutMS=1000 -# 获取leader的backup request超时时间 +# Obtain the backup request timeout for the leader metacache.getLeaderBackupRequestMS=100 -# 获取leader的重试次数 +# Retrieve the number of retries for the leader metacache.getLeaderRetry=3 -# getleader接口每次重试之前需要先睡眠一段时间 +# The getleader interface needs to sleep for a period of time before each retry metacache.rpcRetryIntervalUS=100000 # -############### 调度层的配置信息 ############# +###############Configuration information of the scheduling layer############# # -# 调度层队列大小,每个文件对应一个队列 -# 调度队列的深度会影响client端整体吞吐,这个队列存放的是异步IO任务。。 +# Scheduling layer queue size, with one queue for each file +# The depth of the scheduling queue can affect the overall throughput of the client, as it stores asynchronous IO tasks.. schedule.queueCapacity=4096 -# 队列的执行线程数量 -# 执行线程所要做的事情就是将IO取出,然后发到网络就返回取下一个网络任务。一个任务从 -# 队列取出到发送完rpc请求大概在(20us-100us),20us是正常情况下不需要获取leader的时候 -# 如果在发送的时候需要获取leader,时间会在100us左右,一个线程的吞吐在10w-50w -# 性能已经满足需求 +# Number of Execution Threads for the Queue +# The task of execution threads is to retrieve IO and then send it over the network before moving on to the next network task. +# The time it takes for a task to be retrieved from the queue and the RPC request to be sent typically ranges from 20 microseconds (20us) to 100 microseconds (100us). +# The lower end of this range, 20us, is under normal conditions when leader acquisition is not required during transmission. If leader acquisition is necessary during transmission, the time may extend to around 100us. +# The throughput of a single thread ranges from 100,000 (10w) to 500,000 (50w) tasks per second. This performance level meets the requirements. schedule.threadpoolSize=2 -# 为隔离qemu侧线程引入的任务队列,因为qemu一侧只有一个IO线程 -# 当qemu一侧调用aio接口的时候直接将调用push到任务队列就返回, -# 这样libcurve不占用qemu的线程,不阻塞其异步调用 +# To isolate the task queue introduced by the QEMU side thread, as there is only one IO thread on the QEMU side +# When the QEMU side calls the AIO interface, it directly pushes the call to the task queue and returns, +# This way, libcurve does not occupy QEMU's threads and does not block its asynchronous calls isolation.taskQueueCapacity=500000 -# 任务队列线程池大小, 默认值为1个线程 +# Task queue thread pool size, default value is 1 thread isolation.taskThreadPoolSize=1 # -################ 与chunkserver通信相关配置 ############# +################ Configuration related to communication with chunkserver ############# # -# 读写接口失败的OP之间重试睡眠 +# Retrying sleep between OPs with failed read/write interfaces chunkserver.opRetryIntervalUS=50000 -# 失败的OP重试次数 +# Number of failed OP retries chunkserver.opMaxRetry=3 -# 与chunkserver通信的rpc超时时间 +# RPC timeout for communication with chunkserver chunkserver.rpcTimeoutMS=1000 -# 开启基于appliedindex的读,用于性能优化 +# Enable reading based on appliedindex for performance optimization chunkserver.enableAppliedIndexRead=1 -# 下发IO最大的分片KB +# Maximum sharding KB for issuing IO global.fileIOSplitMaxSizeKB=4 -# libcurve底层rpc调度允许最大的未返回rpc数量,每个文件的inflight RPC独立 +# libcurve allows for the maximum number of unreturned rpcs in the underlying rpc scheduling, with each file's inflight RPC being independent global.fileMaxInFlightRPCNum=2048 -# 重试请求之间睡眠最长时间 -# 因为当网络拥塞的时候或者chunkserver出现过载的时候,需要增加睡眠时间 -# 这个时间最大为maxRetrySleepIntervalUs +# Maximum sleep time between retry requests +# Because when the network is congested or the chunkserver is overloaded, it is necessary to increase sleep time +# The maximum time for this is maxRetrySleepIntervalUs chunkserver.maxRetrySleepIntervalUS=8000000 -# 重试请求的超时rpc时间最大值,超时时间会遵循指数退避策略 -# 因为当网络拥塞的时候出现超时,需要增加RPC超时时间 -# 这个时间最大为maxTimeoutMS +# The maximum timeout rpc time for retry requests, which follows an exponential backoff strategy +# Because timeout occurs when the network is congested, it is necessary to increase the RPC timeout time +# The maximum time for this is maxTimeoutMS chunkserver.maxRPCTimeoutMS=8000 -# 同一个chunkserver连续超时上限次数 -# 如果超过这个值,就会进行健康检查,健康检查失败后,会标记为unstable +# Maximum number of consecutive timeouts for the same chunkserver +# If this value is exceeded, a health check will be conducted, and if the health check fails, it will be marked as unstable chunkserver.maxStableTimeoutTimes=64 -# chunkserver上rpc连续超时后,健康检查请求的超时间 +# The timeout of health check requests after consecutive RPC timeouts on chunkserver chunkserver.checkHealthTimeoutMs=100 -# 同一个server上unstable的chunkserver数量超过这个值之后 -# 所有的chunkserver都会标记为unstable +# After the number of unstable chunkservers on the same server exceeds this value +# All chunkservers will be marked as unstable chunkserver.serverStableThreshold=3 -# 当一个rpc重试超过次数maxRetryTimesBeforeConsiderSuspend的时候 -# 记为悬挂IO,metric会报警 +# When an RPC retry exceeds maxRetryTimesBeforeConsiderSuspend +# Record as suspended IO, metric will alarm chunkserver.maxRetryTimesBeforeConsiderSuspend=20 chunkserver.opRetryIntervalUS=100000 metacache.getLeaderBackupRequestMS=100 # -################# log相关配置 ############### +################# Log related configuration ############### # -# log等级 INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3 global.logLevel=0 -# 设置log的路径 +# Set the path of the log global.logPath=./runlog/ -# 单元测试情况下 +# In the case of unit testing # logpath=./runlog/ # -############### metric 配置信息 ############# +############### metric configuration information ############# # global.metricDummyServerStartPort=9000 # -# session map文件,存储打开文件的filename到path的映射 +# session map file, storing the mapping from filename to path of the opened file # global.sessionMapPath=./session_map.json diff --git a/test/tools/copyset_check_core_test.cpp b/test/tools/copyset_check_core_test.cpp index 9ef6de55ce..63bdc55417 100644 --- a/test/tools/copyset_check_core_test.cpp +++ b/test/tools/copyset_check_core_test.cpp @@ -176,7 +176,7 @@ TEST_F(CopysetCheckCoreTest, Init) { ASSERT_EQ(-1, copysetCheck.Init("127.0.0.1:6666")); } -// CheckOneCopyset正常情况 +//CheckOneCopyset normal situation TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { std::vector csLocs; butil::IOBuf followerBuf; @@ -215,7 +215,7 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetNormal) { ASSERT_EQ(iobuf.to_string(), copysetCheck.GetCopysetDetail()); } -// CheckOneCopyset异常情况 +//CheckOneCopyset Exception TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { std::vector csLocs; butil::IOBuf followerBuf; @@ -231,14 +231,14 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { copyset.set_logicalpoolid(1); copyset.set_copysetid(100); - // 1、GetChunkServerListInCopySet失败 + //1. GetChunkServerListInCopySet failed EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); CopysetCheckCore copysetCheck1(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck1.CheckOneCopyset(1, 100)); - // 2、copyset不健康 + //2. Copyset is unhealthy GetIoBufForTest(&followerBuf, "4294967396", "FOLLOWER", true); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) @@ -254,7 +254,7 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(CheckResult::kOtherErr, copysetCheck2.CheckOneCopyset(1, 100)); - // 3、有peer不在线,一个是chunkserver不在线,一个是copyset不在线 + //3. Some peers are not online, one is chunkserver, and the other is copyset GetIoBufForTest(&followerBuf, "4294967397"); EXPECT_CALL(*mdsClient_, GetChunkServerListInCopySet(_, _, _)) .Times(1) @@ -276,7 +276,7 @@ TEST_F(CopysetCheckCoreTest, CheckOneCopysetError) { } -// CheckCopysetsOnChunkserver正常情况 +//CheckCopysetsOnChunkserver normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -297,7 +297,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { csServerInfos.emplace_back(csServerInfo); } - // mds返回Chunkserver retired的情况,直接返回0 + //Mds returns the case of Chunkserver retired, directly returning 0 GetCsInfoForTest(&csInfo, csId, false, "LEADER"); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -309,7 +309,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); expectedRes[kTotal].insert(gId); - // 通过id查询,有一个copyset配置组中没有当前chunkserver,应忽略 + //Through ID query, there is a copyset configuration group that does not have the current chunkserver and should be ignored GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -331,7 +331,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ASSERT_DOUBLE_EQ(0, copysetCheck2.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 通过地址查询 + //Search through address EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(csInfo), @@ -353,7 +353,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerHealthy) { ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); } -// CheckCopysetsOnChunkserver异常情况 +//CheckCopysetsOnChunkserver Exception TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ChunkServerIdType csId = 1; std::string csAddr = "127.0.0.1:9191"; @@ -376,7 +376,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { GetIoBufForTest(&followerBuf2, gId, "FOLLOWER", true); std::map> expectedRes; - // 1、GetChunkServerInfo失败的情况 + //1. The situation of GetChunkServerInfo failur CopysetCheckCore copysetCheck1(mdsClient_, csClient_); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -385,7 +385,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_DOUBLE_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、chunkserver发送RPC失败的情况 + //2. The situation where chunkserver fails to send RPC std::vector csServerInfos; for (int i = 1; i <= 3; ++i) { CopySetServerInfo csServerInfo; @@ -436,7 +436,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); expectedRes.clear(); - // 3、获取chunkserver上的copyset失败的情况 + //3. Failure in obtaining copyset on chunkserver GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -455,7 +455,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck3.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // 4、获取copyset对应的chunkserver列表失败的情况 + //4. Failure in obtaining the chunkserver list corresponding to the copyset GetCsInfoForTest(&csInfo, csId); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -480,7 +480,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck4.GetServiceExceptionChunkServer()); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 检查copyset是否在配置组中时出错 + //Error checking if copyset is in configuration group EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csAddr, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(csInfo), @@ -499,10 +499,10 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerError) { ASSERT_EQ(-1, copysetCheck5.CheckCopysetsOnChunkServer(csAddr)); } -// chunkserver上copyset不健康的情况 -// 检查单个server和集群都是复用的CheckCopysetsOnChunkserver -// 所以CheckCopysetsOnChunkserver要测每个不健康的情况,其他的只要测健康和不健康还有不在线的情况就好 -// 具体什么原因不健康不用关心 +// Unhealthy copyset on chunkserver +// Check that both individual servers and clusters are reusable CheckCopysetsOnChunkservers +// So CheckCopysetsOnChunkserver needs to test every unhealthy situation, and the rest just needs to test for healthy, unhealthy, and offline situations +// What are the specific reasons for being unhealthy? Don't worry TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ChunkServerIdType csId = 1; std::string csAddr1 = "127.0.0.1:9194"; @@ -516,7 +516,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { uint64_t gId = 4294967396; std::string groupId; - // 1、首先加入9个健康的copyset + //1. First, add 9 healthy copysets for (int i = 0; i < 9; ++i) { groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, @@ -524,43 +524,43 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; } - // 2、加入没有leader的copyset + //2. Add a copyset without a leader groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 3、加入正在安装快照的copyset + //3. Add a copyset that is currently installing snapshots groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "LEADER", false, true, false, false, false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kInstallingSnapshot].emplace(groupId); os << temp << "\r\n"; - // 4、加入peer不足的copyset + //4. Add a copyset with insufficient peers groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "LEADER", false, false, true, false, false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kPeersNoSufficient].emplace(groupId); os << temp << "\r\n"; - // 5、加入日志差距大的copset + //5. Add a eclipse with a large log gap groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, true, false, false); expectedRes[kTotal].emplace(groupId); expectedRes[kLogIndexGapTooBig].emplace(groupId); os << temp << "\r\n"; - // 6、加入无法解析的copyset,这种情况不会发生,发生了表明程序有bug - // 打印错误信息,但不会加入到unhealthy + //6. Add a copyset that cannot be parsed. This situation will not occur, indicating a bug in the program + // Print error message, but it will not be added to unhealthy groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, true, false); expectedRes[kTotal].emplace(groupId); os << temp << "\r\n"; - // 7.1、加入少数peer不在线的copyset + //7.1. Add a few copysets where peers are not online groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "LEADER", false, false, false, false, false, true); @@ -568,7 +568,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { expectedRes[kMinorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 7.2、加入大多数peer不在线的copyset + //7.2. Add copysets where most peers are not online groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "FOLLOWER", true, false, false, false, false, false, true); @@ -576,35 +576,35 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { expectedRes[kMajorityPeerNotOnline].emplace(groupId); os << temp << "\r\n"; - // 8、加入CANDIDATE状态的copyset + //8. Add a copyset in the CANDIDATE state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "CANDIDATE"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 9、加入TRANSFERRING状态的copyset + //9. Add a copyset in the TRANSFERRING state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "TRANSFERRING"); expectedRes[kTotal].emplace(groupId); expectedRes[kNoLeader].emplace(groupId); os << temp << "\r\n"; - // 10、加入ERROR状态的copyset + //10. Add a copyset in the ERROR state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "ERROR"); expectedRes[kTotal].emplace(groupId); expectedRes["state ERROR"].emplace(groupId); os << temp << "\r\n"; - // 11、加入SHUTDOWN状态的copyset + //11. Add a copyset in SHUTDOWN state groupId = std::to_string(gId++); GetIoBufForTest(&temp, groupId, "SHUTDOWN"); expectedRes[kTotal].emplace(groupId); expectedRes["state SHUTDOWN"].emplace(groupId); os << temp; - // 设置mock对象的返回,8个正常iobuf里面,设置一个的peer不在线,因此unhealthy++ + //Set the return of mock objects. Among the 8 normal iobufs, one peer is set to be offline, resulting in unhealthy++ os.move_to(iobuf); EXPECT_CALL(*mdsClient_, GetChunkServerInfo(csId, _)) .Times(1) @@ -632,7 +632,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { .WillRepeatedly(DoAll(SetArgPointee<2>(csServerInfos), Return(0))); - // 检查结果 + //Inspection results std::set expectedExcepCs = {csAddr1, csAddr2}; CopysetCheckCore copysetCheck(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck.CheckCopysetsOnChunkServer(csId)); @@ -641,7 +641,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnChunkServerUnhealthy) { ASSERT_EQ(expectedExcepCs, copysetCheck.GetServiceExceptionChunkServer()); } -// CheckCopysetsOnServer正常情况 +//CheckCopysetsOnServer normal condition TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ServerIdType serverId = 1; std::string serverIp = "127.0.0.1"; @@ -659,7 +659,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { GetIoBufForTest(&iobuf, groupId, "LEADER", false, false, false, false, false, false); - // 通过id查询 + //Query by ID EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(chunkservers), @@ -678,7 +678,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 通过ip查询 + //Query through IP EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverIp, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(chunkservers), @@ -690,7 +690,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { .Times(3) .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); - // 通过ip查询 + //Query through IP CopysetCheckCore copysetCheck2(mdsClient_, csClient_); ASSERT_EQ(0, copysetCheck2.CheckCopysetsOnServer(serverIp, &unhealthyCs)); ASSERT_EQ(0, unhealthyCs.size()); @@ -698,7 +698,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerNormal) { ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); } -// CheckCopysetsOnServer异常情况 +//CheckCopysetsOnServer Exceptio TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ServerIdType serverId = 1; butil::IOBuf iobuf; @@ -721,7 +721,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { gIds.emplace(std::to_string(gId)); } - // 1、ListChunkServersOnServer失败的情况 + //1. Situation of ListChunkServersOnServer failure EXPECT_CALL(*mdsClient_, ListChunkServersOnServer(serverId, _)) .Times(1) .WillOnce(Return(-1)); @@ -730,7 +730,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 3、一个chunkserver访问失败,一个chunkserver不健康的情况 + //3. A chunkserver access failure and an unhealthy chunkserver situation GetIoBufForTest(&iobuf, groupId, "LEADER", false, true); expectedRes[kTotal] = gIds; expectedRes[kTotal].emplace(groupId); @@ -768,7 +768,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsOnServerError) { ASSERT_EQ(expectedExcepCs, copysetCheck2.GetServiceExceptionChunkServer()); } -// CheckCopysetsInCluster正常情况 +//CheckCopysetsInCluster normal situation TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterNormal) { butil::IOBuf iobuf; GetIoBufForTest(&iobuf, "4294967396", "LEADER"); @@ -826,7 +826,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { GetCsInfoForTest(&chunkserver, 1); std::vector chunkservers = {chunkserver}; - // 1、ListServersInCluster失败 + //1. ListServersInCluster failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -835,7 +835,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { ASSERT_EQ(0, copysetCheck1.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck1.GetCopysetsRes()); - // 2、CheckCopysetsOnServer返回不为0 + //2. CheckCopysetsOnServer returned a non zero value EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(servers), @@ -854,7 +854,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { expectedRes[kTotal] = {}; ASSERT_EQ(expectedRes, copysetCheck2.GetCopysetsRes()); - // 3、GetMetric失败 + //3. GetMetric failed expectedRes[kTotal] = {"4294967396"}; EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(2) @@ -884,18 +884,18 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { .Times(2) .WillRepeatedly(DoAll(SetArgPointee<0>(copysetsInMds), Return(0))); - // 获取operator失败 + //Failed to obtain operator CopysetCheckCore copysetCheck3(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck3.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck3.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck3.GetCopysetsRes()); - // operator数量大于0 + //The number of operators is greater than 0 CopysetCheckCore copysetCheck4(mdsClient_, csClient_); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); ASSERT_EQ(expectedRes, copysetCheck4.GetCopysetsRes()); - // 4、比较chunkserver跟mds的copyset失败 + //4. Failed to compare the copyset between chunkserver and mds EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<0>(servers), @@ -911,13 +911,13 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { .Times(9) .WillRepeatedly(DoAll(SetArgPointee<0>(iobuf), Return(0))); - // 从获取copyset失败 + //Failed to obtain copyset from EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) .WillRepeatedly(Return(-1)); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量不一致 + //Inconsistent number of copysets copysetsInMds.clear(); copyset.set_logicalpoolid(1); copyset.set_copysetid(101); @@ -930,7 +930,7 @@ TEST_F(CopysetCheckCoreTest, CheckCopysetsInClusterError) { Return(0))); ASSERT_EQ(-1, copysetCheck4.CheckCopysetsInCluster()); ASSERT_EQ(0, copysetCheck4.GetCopysetStatistics().unhealthyRatio); - // copyset数量一致,但是内容不一致 + //The number of copysets is consistent, but the content is inconsistent copysetsInMds.pop_back(); EXPECT_CALL(*mdsClient_, GetCopySetsInCluster(_, _)) .Times(1) @@ -944,18 +944,18 @@ TEST_F(CopysetCheckCoreTest, CheckOperator) { CopysetCheckCore copysetCheck(mdsClient_, csClient_); std::string opName = "change_peer"; uint64_t checkTime = 3; - // 1、获取metric失败 + //1. Failed to obtain metric EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, copysetCheck.CheckOperator(opName, checkTime)); - // 2、operator数量不为0 + //2. The number of operators is not 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(10), Return(0))); ASSERT_EQ(10, copysetCheck.CheckOperator(opName, checkTime)); - // 3、operator数量为0 + //3. The number of operators is 0 EXPECT_CALL(*mdsClient_, GetMetric(_, _)) .WillRepeatedly(DoAll(SetArgPointee<1>(0), Return(0))); diff --git a/test/tools/copyset_check_test.cpp b/test/tools/copyset_check_test.cpp index 01c7e3f4c2..7efa9c2cd4 100644 --- a/test/tools/copyset_check_test.cpp +++ b/test/tools/copyset_check_test.cpp @@ -147,7 +147,7 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { {"127.0.0.1:9091", "127.0.0.1:9092", "127.0.0.1:9093"}; std::string copysetDetail = iobuf.to_string(); - // Init失败的情况 + // The situation of Init failure EXPECT_CALL(*core_, Init(_)) .Times(1) .WillOnce(Return(-1)); @@ -156,16 +156,16 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { EXPECT_CALL(*core_, Init(_)) .Times(1) .WillOnce(Return(0)); - // 不支持的命令 + // Unsupported command ASSERT_EQ(-1, copysetCheck.RunCommand("check-nothings")); copysetCheck.PrintHelp("check-nothins"); - // 没有指定逻辑池和copyset的话返回失败 + // If no logical pool and copyset are specified, a failure is returne ASSERT_EQ(-1, copysetCheck.RunCommand("check-copyset")); FLAGS_logicalPoolId = 1; FLAGS_copysetId = 100; copysetCheck.PrintHelp("check-copyset"); - // 健康的情况 + // Healthy situation EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kHealthy)); @@ -180,7 +180,7 @@ TEST_F(CopysetCheckTest, CheckOneCopyset) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-copyset")); - // copyset不健康的情况 + // The unhealthy situation of copyset EXPECT_CALL(*core_, CheckOneCopyset(_, _)) .Times(1) .WillOnce(Return(CheckResult::kLogIndexGapTooBig)); @@ -202,12 +202,12 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { EXPECT_CALL(*core_, Init(_)) .Times(1) .WillOnce(Return(0)); - // 没有指定chunkserver的话报错 + // Error reported if chunkserver is not specified ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); copysetCheck.PrintHelp("check-chunkserver"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_chunkserverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverId)) .Times(1) @@ -225,11 +225,11 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // id和地址同时指定,报错 + // Error reported when both ID and address are specified simultaneously FLAGS_chunkserverAddr = "127.0.0.1:8200"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-chunkserver")); FLAGS_chunkserverId = 0; - // 通过地址查询 + // Search through address EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(0)); @@ -247,7 +247,7 @@ TEST_F(CopysetCheckTest, testCheckChunkServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-chunkserver")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnChunkServer(FLAGS_chunkserverAddr)) .Times(1) .WillOnce(Return(-1)); @@ -275,12 +275,12 @@ TEST_F(CopysetCheckTest, testCheckServer) { .Times(1) .WillOnce(Return(0)); - // 没有指定server的话报错 + // If no server is specified, an error will be reported ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); copysetCheck.PrintHelp("check-server"); - // 健康的情况 - // 通过id查询 + // Healthy situation + // Query by ID FLAGS_serverId = 1; EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverId, _)) .Times(1) @@ -299,11 +299,11 @@ TEST_F(CopysetCheckTest, testCheckServer) { .Times(1) .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // id和ip同时指定,报错 + // Error reported when both ID and IP are specified simultaneously FLAGS_serverIp = "127.0.0.1"; ASSERT_EQ(-1, copysetCheck.RunCommand("check-server")); FLAGS_serverId = 0; - // 通过ip查询 + // Query through IP EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(chunkservers), @@ -322,7 +322,7 @@ TEST_F(CopysetCheckTest, testCheckServer) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand("check-server")); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsOnServer(FLAGS_serverIp, _)) .Times(1) .WillOnce(Return(-1)); @@ -348,7 +348,7 @@ TEST_F(CopysetCheckTest, testCheckCluster) { .Times(1) .WillOnce(Return(0)); - // 健康的情况 + // Healthy situation EXPECT_CALL(*core_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); @@ -366,7 +366,7 @@ TEST_F(CopysetCheckTest, testCheckCluster) { .WillOnce(ReturnRef(emptySet)); ASSERT_EQ(0, copysetCheck.RunCommand(kCopysetsStatusCmd)); - // 不健康的情况 + // Unhealthy situation EXPECT_CALL(*core_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(-1)); @@ -392,10 +392,10 @@ TEST_F(CopysetCheckTest, testCheckOperator) { .Times(1) .WillOnce(Return(0)); - // 1、不支持的operator + // 1. Unsupported operator FLAGS_opName = "no_operator"; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、transfer leader的operator和total的 + // 2. The operator and total of the transfer leader EXPECT_CALL(*core_, CheckOperator(_, FLAGS_leaderOpInterval)) .Times(2) .WillOnce(Return(0)) @@ -404,7 +404,7 @@ TEST_F(CopysetCheckTest, testCheckOperator) { ASSERT_EQ(0, copysetCheck.RunCommand(kCheckOperatorCmd)); FLAGS_opName = kTotalOpName; ASSERT_EQ(-1, copysetCheck.RunCommand(kCheckOperatorCmd)); - // 2、其他operator + // 2. Other operators EXPECT_CALL(*core_, CheckOperator(_, FLAGS_opIntervalExceptLeader)) .Times(3) .WillOnce(Return(10)) diff --git a/test/tools/curve_cli_test.cpp b/test/tools/curve_cli_test.cpp index 133d9de42d..7deba55e2f 100644 --- a/test/tools/curve_cli_test.cpp +++ b/test/tools/curve_cli_test.cpp @@ -113,20 +113,20 @@ TEST_F(CurveCliTest, RemovePeer) { curveCli.PrintHelp("remove-peer"); curveCli.PrintHelp("test"); curveCli.RunCommand("test"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("remove-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -148,7 +148,7 @@ TEST_F(CurveCliTest, RemovePeer) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("remove-peer")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce( Invoke([](RpcController *controller, @@ -210,21 +210,21 @@ TEST_F(CurveCliTest, RemovePeer) { TEST_F(CurveCliTest, TransferLeader) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("transfer-leader"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // conf为空 + // conf is empty FLAGS_peer = peer; FLAGS_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析conf失败 + // Failed to parse conf FLAGS_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 解析peer失败 + // Parsing peer failed FLAGS_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("transfer-leader")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; curve::common::Peer* targetPeer = new curve::common::Peer; targetPeer->set_address(peer); @@ -239,7 +239,7 @@ TEST_F(CurveCliTest, TransferLeader) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(0, curveCli.RunCommand("transfer-leader")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, GetLeader(_, _, _, _)) .WillOnce( Invoke([](RpcController *controller, @@ -257,28 +257,28 @@ TEST_F(CurveCliTest, TransferLeader) { TEST_F(CurveCliTest, ResetPeer) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("reset-peer"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf为空 + // newConf is empty FLAGS_peer = peer; FLAGS_new_conf = ""; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析newConf失败 + // Failed to parse newConf FLAGS_new_conf = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 解析peer失败 + // Parsing peer failed FLAGS_new_conf = conf; FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf有三个副本 + // newConf has three copies FLAGS_peer = peer; FLAGS_new_conf = "127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // newConf不包含peer + // newConf does not contain peer FLAGS_new_conf = "127.0.0.1:8201:0"; ASSERT_EQ(-1, curveCli.RunCommand("reset-peer")); - // 执行变更成功 + // Successfully executed changes FLAGS_new_conf = conf; EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) .WillOnce(Invoke([](RpcController *controller, @@ -288,7 +288,7 @@ TEST_F(CurveCliTest, ResetPeer) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("reset-peer")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, ResetPeer(_, _, _, _)) .WillOnce(Invoke([](RpcController *controller, const ResetPeerRequest2 *request, @@ -305,13 +305,13 @@ TEST_F(CurveCliTest, ResetPeer) { TEST_F(CurveCliTest, DoSnapshot) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot"); - // peer为空 + // peer is empty FLAGS_peer = ""; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 解析peer失败 + // Parsing peer failed FLAGS_peer = "1234"; ASSERT_EQ(-1, curveCli.RunCommand("do-snapshot")); - // 执行变更成功 + // Successfully executed changes FLAGS_peer = peer; EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) .WillOnce(Invoke([](RpcController *controller, @@ -321,7 +321,7 @@ TEST_F(CurveCliTest, DoSnapshot) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, Snapshot(_, _, _, _)) .WillOnce(Invoke([](RpcController *controller, const SnapshotRequest2 *request, @@ -338,7 +338,7 @@ TEST_F(CurveCliTest, DoSnapshot) { TEST_F(CurveCliTest, DoSnapshotAll) { curve::tool::CurveCli curveCli(mdsClient_); curveCli.PrintHelp("do-snapshot-all"); - // 执行变更成功 + // Successfully executed changes std::vector chunkservers; ChunkServerInfo csInfo; csInfo.set_hostip("127.0.0.1"); @@ -361,7 +361,7 @@ TEST_F(CurveCliTest, DoSnapshotAll) { brpc::ClosureGuard doneGuard(done); })); ASSERT_EQ(0, curveCli.RunCommand("do-snapshot-all")); - // 执行变更失败 + // Failed to execute changes EXPECT_CALL(*mockCliService, SnapshotAll(_, _, _, _)) .Times(1) .WillOnce(Invoke([](RpcController *controller, diff --git a/test/tools/curve_meta_tool_test.cpp b/test/tools/curve_meta_tool_test.cpp index 1d493c56f8..6d1b694f1f 100644 --- a/test/tools/curve_meta_tool_test.cpp +++ b/test/tools/curve_meta_tool_test.cpp @@ -65,7 +65,7 @@ TEST_F(CurveMetaToolTest, SupportCommand) { TEST_F(CurveMetaToolTest, PrintChunkMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(6) .WillOnce(Return(-1)) @@ -74,21 +74,21 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { .Times(5) .WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 3、解析失败 + // 3. Parsing failed char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("chunk-meta")); - // 4、普通chunk + // 4. Ordinary chunk ChunkFileMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; @@ -99,7 +99,7 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), Return(PAGE_SIZE))); ASSERT_EQ(0, curveMetaTool.RunCommand("chunk-meta")); - // 5、克隆chunk + // 5. Clone chunk metaPage.location = "test@s3"; uint32_t size = CHUNK_SIZE / PAGE_SIZE; auto bitmap = std::make_shared(size); @@ -116,7 +116,7 @@ TEST_F(CurveMetaToolTest, PrintChunkMeta) { TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { CurveMetaTool curveMetaTool(localFs_); - // 1、文件不存在 + // 1. The file does not exist EXPECT_CALL(*localFs_, Open(_, _)) .Times(5) .WillOnce(Return(-1)) @@ -125,21 +125,21 @@ TEST_F(CurveMetaToolTest, PrintSnapshotMeta) { .Times(4) .WillRepeatedly(Return(-1)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 2、读取meta page失败 + // 2. Failed to read meta page EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(2) .WillOnce(Return(-1)) .WillOnce(Return(10)); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 3、解析失败 + // 3. Parsing faile char buf[PAGE_SIZE] = {0}; EXPECT_CALL(*localFs_, Read(_, _, 0, PAGE_SIZE)) .Times(1) .WillOnce(DoAll(SetArrayArgument<1>(buf, buf + PAGE_SIZE), Return(PAGE_SIZE))); ASSERT_EQ(-1, curveMetaTool.RunCommand("snapshot-meta")); - // 4、成功chunk + // 4. Successful Chunk SnapshotMetaPage metaPage; metaPage.version = 1; metaPage.sn = 1; diff --git a/test/tools/data_consistency_check_test.cpp b/test/tools/data_consistency_check_test.cpp index 15cd238004..e5d4ee97c1 100644 --- a/test/tools/data_consistency_check_test.cpp +++ b/test/tools/data_consistency_check_test.cpp @@ -110,7 +110,7 @@ TEST_F(ConsistencyCheckTest, Consistency) { CopysetStatusResponse response; GetCopysetStatusForTest(&response); - // 设置期望 + // Set expectations EXPECT_CALL(*nameSpaceTool_, Init(_)) .Times(2) .WillRepeatedly(Return(0)); @@ -133,19 +133,19 @@ TEST_F(ConsistencyCheckTest, Consistency) { .Times(30) .WillRepeatedly(DoAll(SetArgPointee<1>("1111"), Return(0))); - // 1、检查hash + // 1. Check hash FLAGS_check_hash = true; curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); cfc1.PrintHelp("check-consistency"); cfc1.PrintHelp("check-nothing"); ASSERT_EQ(0, cfc1.RunCommand("check-consistency")); - // 2、检查applyIndex + // 2. Check the applyIndex FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(0, cfc2.RunCommand("check-consistency")); ASSERT_EQ(-1, cfc2.RunCommand("check-nothing")); - // mds返回副本为空的情况 + // Mds returns a situation where the replica is empty EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<1>(segments), @@ -180,7 +180,7 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { CopysetStatusResponse response3; GetCopysetStatusForTest(&response3, 2222); - // 设置期望 + // Set expectations EXPECT_CALL(*nameSpaceTool_, Init(_)) .Times(3) .WillRepeatedly(Return(0)); @@ -193,7 +193,7 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), Return(0))); - // 1、检查hash,apply index一致,hash不一致 + // 1. Check if the hash and apply index are consistent and the hash is inconsistent FLAGS_check_hash = true; EXPECT_CALL(*csClient_, Init(_)) .Times(5) @@ -211,7 +211,7 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { curve::tool::ConsistencyCheck cfc1(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc1.RunCommand("check-consistency")); - // 2、检查hash的时候apply index不一致 + // 2. When checking the hash, the apply index is inconsistent EXPECT_CALL(*csClient_, Init(_)) .Times(2) .WillRepeatedly(Return(0)); @@ -224,7 +224,7 @@ TEST_F(ConsistencyCheckTest, NotConsistency) { curve::tool::ConsistencyCheck cfc2(nameSpaceTool_, csClient_); ASSERT_EQ(-1, cfc2.RunCommand("check-consistency")); - // 3、检查applyIndex + // 3. Check the applyIndex FLAGS_check_hash = false; EXPECT_CALL(*csClient_, Init(_)) .Times(2) @@ -254,12 +254,12 @@ TEST_F(ConsistencyCheckTest, CheckError) { } FLAGS_check_hash = false; curve::tool::ConsistencyCheck cfc(nameSpaceTool_, csClient_); - // 0、Init失败 + // 0. Init failed EXPECT_CALL(*nameSpaceTool_, Init(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 1、获取segment失败 + // 1. Failed to obtain segment EXPECT_CALL(*nameSpaceTool_, Init(_)) .Times(1) .WillOnce(Return(0)); @@ -268,7 +268,7 @@ TEST_F(ConsistencyCheckTest, CheckError) { .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 2、获取chunkserver list失败 + // 2. Failed to obtain chunkserver list EXPECT_CALL(*nameSpaceTool_, GetFileSegments(_, _)) .Times(4) .WillRepeatedly(DoAll(SetArgPointee<1>(segments), @@ -278,7 +278,7 @@ TEST_F(ConsistencyCheckTest, CheckError) { .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 3、init 向chunkserverclient init失败 + // 3. Failed to init to chunkserverclient init EXPECT_CALL(*nameSpaceTool_, GetChunkServerListInCopySet(_, _, _)) .Times(3) .WillRepeatedly(DoAll(SetArgPointee<2>(csLocs), @@ -288,7 +288,7 @@ TEST_F(ConsistencyCheckTest, CheckError) { .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 4、从chunkserver获取copyset status失败 + // 4. Failed to obtain copyset status from chunkserver EXPECT_CALL(*csClient_, Init(_)) .Times(1) .WillOnce(Return(0)); @@ -297,7 +297,7 @@ TEST_F(ConsistencyCheckTest, CheckError) { .WillOnce(Return(-1)); ASSERT_EQ(-1, cfc.RunCommand("check-consistency")); - // 5、从chunkserver获取chunk hash失败 + // 5. Failed to obtain chunk hash from chunkserver FLAGS_check_hash = true; CopysetStatusResponse response1; GetCopysetStatusForTest(&response1); diff --git a/test/tools/etcd_client_test.cpp b/test/tools/etcd_client_test.cpp index b6774425bd..b94533ed42 100644 --- a/test/tools/etcd_client_test.cpp +++ b/test/tools/etcd_client_test.cpp @@ -36,8 +36,8 @@ class EtcdClientTest : public ::testing::Test { ASSERT_TRUE(false); } else if (0 == etcdPid) { /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as it may cause deadlock!!! */ ASSERT_EQ(0, execlp("etcd", "etcd", "--listen-client-urls", @@ -50,7 +50,7 @@ class EtcdClientTest : public ::testing::Test { "--name", "toolEtcdClientTest", nullptr)); exit(0); } - // 一定时间内尝试check直到etcd完全起来 + // Try checking for a certain period of time until the ETCD is completely up curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); bool running; @@ -81,14 +81,14 @@ class EtcdClientTest : public ::testing::Test { TEST_F(EtcdClientTest, GetEtcdClusterStatus) { curve::tool::EtcdClient client; - // Init失败的情况 + // The situation of Init failure ASSERT_EQ(-1, client.Init("")); - // Init成功 + // Init succeeded ASSERT_EQ(0, client.Init(etcdAddr)); std::vector leaderAddrVec; std::map onlineState; - // 正常情况 + // Normal situation ASSERT_EQ(0, client.GetEtcdClusterStatus(&leaderAddrVec, &onlineState)); std::map expected = { { "127.0.0.1:2366", true }, { "127.0.0.1:2368", false } }; @@ -96,7 +96,7 @@ TEST_F(EtcdClientTest, GetEtcdClusterStatus) { ASSERT_EQ(1, leaderAddrVec.size()); ASSERT_EQ("127.0.0.1:2366", leaderAddrVec[0]); - // 空指针错误 + // Null pointer error ASSERT_EQ(-1, client.GetEtcdClusterStatus(nullptr, &onlineState)); ASSERT_EQ(-1, client.GetEtcdClusterStatus(&leaderAddrVec, nullptr)); } @@ -105,13 +105,13 @@ TEST_F(EtcdClientTest, GetAndCheckEtcdVersion) { curve::tool::EtcdClient client; ASSERT_EQ(0, client.Init("127.0.0.1:2366")); - // 正常情况 + // Normal situation std::string version; std::vector failedList; ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 个别etcd获取version失败 + // Individual ETCD failed to obtain version ASSERT_EQ(0, client.Init(etcdAddr)); ASSERT_EQ(0, client.GetAndCheckEtcdVersion(&version, &failedList)); ASSERT_EQ(1, failedList.size()); diff --git a/test/tools/mds_client_test.cpp b/test/tools/mds_client_test.cpp index e261d43895..2d59737cfc 100644 --- a/test/tools/mds_client_test.cpp +++ b/test/tools/mds_client_test.cpp @@ -90,7 +90,7 @@ class ToolMDSClientTest : public ::testing::Test { ASSERT_EQ(0, server->Start("127.0.0.1:9192", nullptr)); brpc::StartDummyServerAt(9193); - // 初始化mds client + // Initialize mds client curve::mds::topology::ListPhysicalPoolResponse response; response.set_statuscode(kTopoErrCodeSuccess); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) @@ -220,9 +220,9 @@ TEST(MDSClientInitTest, Init) { ASSERT_EQ(-1, mdsClient.Init("")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1")); ASSERT_EQ(-1, mdsClient.Init("127.0.0.1:65536")); - // dummy server非法 + // dummy server is illegal ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "")); - // dummy server与mds不匹配 + // dummy server and mds do not match ASSERT_EQ(-1, mdsClient.Init(mdsAddr, "9091,9092,9093")); } @@ -232,7 +232,7 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { std::string filename = "/test"; curve::mds::FileInfo outFileInfo; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke([](RpcController *controller, @@ -246,7 +246,7 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { })); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 返回码不为OK + // The return code is not O curve::mds::GetFileInfoResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetFileInfo(_, _, _, _)) @@ -258,7 +258,7 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetFileInfo(filename, &outFileInfo)); - // 正常情况 + // Normal situation curve::mds::FileInfo *info = new curve::mds::FileInfo; GetFileInfoForTest(1, info); response.set_allocated_fileinfo(info); @@ -277,7 +277,7 @@ TEST_F(ToolMDSClientTest, GetFileInfo) { TEST_F(ToolMDSClientTest, GetAllocatedSize) { uint64_t allocSize; std::string filename = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( @@ -291,7 +291,7 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { })); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 返回码不为OK + // The return code is not OK curve::mds::GetAllocatedSizeResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetAllocatedSize(_, _, _, _)) @@ -303,7 +303,7 @@ TEST_F(ToolMDSClientTest, GetAllocatedSize) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetAllocatedSize(filename, &allocSize)); - // 正常情况 + // Normal situation response.set_allocatedsize(DefaultSegmentSize * 3); for (int i = 1; i <= 3; ++i) { response.mutable_allocsizemap()->insert( @@ -330,7 +330,7 @@ TEST_F(ToolMDSClientTest, ListDir) { std::string fileName = "/test"; std::vector fileInfoVec; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ListDir(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -344,7 +344,7 @@ TEST_F(ToolMDSClientTest, ListDir) { })); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 返回码不为OK + // The return code is not OK curve::mds::ListDirResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListDir(_, _, _, _)) @@ -355,7 +355,7 @@ TEST_F(ToolMDSClientTest, ListDir) { curve::mds::ListDirResponse *response, Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListDir(fileName, &fileInfoVec)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto fileInfo = response.add_fileinfo(); @@ -381,7 +381,7 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { curve::mds::PageFileSegment outSegment; uint64_t offset = 0; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -397,7 +397,7 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // segment不存在 + // segment does not exist curve::mds::GetOrAllocateSegmentResponse response; response.set_statuscode(curve::mds::StatusCode::kSegmentNotAllocated); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) @@ -409,7 +409,7 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(GetSegmentRes::kSegmentNotAllocated, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 文件不存在 + // File does not exist response.set_statuscode(curve::mds::StatusCode::kFileNotExists); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( @@ -421,7 +421,7 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { ASSERT_EQ(GetSegmentRes::kFileNotExists, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 其他错误 + // Other errors response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, GetOrAllocateSegment(_, _, _, _)) .WillOnce(DoAll( @@ -433,7 +433,7 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { ASSERT_EQ(GetSegmentRes::kOtherError, mdsClient.GetSegmentInfo(fileName, offset, &outSegment)); - // 正常情况 + // Normal situation PageFileSegment *segment = new PageFileSegment(); GetSegmentForTest(segment); response.set_statuscode(curve::mds::StatusCode::kOK); @@ -453,7 +453,7 @@ TEST_F(ToolMDSClientTest, GetSegmentInfo) { TEST_F(ToolMDSClientTest, DeleteFile) { std::string fileName = "/test"; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -467,7 +467,7 @@ TEST_F(ToolMDSClientTest, DeleteFile) { })); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 返回码不为OK + // The return code is not OK curve::mds::DeleteFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) @@ -479,7 +479,7 @@ TEST_F(ToolMDSClientTest, DeleteFile) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.DeleteFile(fileName)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, DeleteFile(_, _, _, _)) .WillOnce(DoAll( @@ -505,7 +505,7 @@ TEST_F(ToolMDSClientTest, CreateFile) { context.stripeCount = stripeCount; context.poolset = ""; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke([](RpcController *controller, @@ -519,7 +519,7 @@ TEST_F(ToolMDSClientTest, CreateFile) { })); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 返回码不为OK + // The return code is not OK curve::mds::CreateFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) @@ -532,7 +532,7 @@ TEST_F(ToolMDSClientTest, CreateFile) { }))); ASSERT_EQ(-1, mdsClient.CreateFile(context)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); EXPECT_CALL(*nameService, CreateFile(_, _, _, _)) .WillOnce(DoAll(SetArgPointee<2>(response), @@ -564,7 +564,7 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { std::string fileName = "/test"; uint64_t length = 10 * DefaultSegmentSize; - // 发送RPC失败 + // Sending RPC failed EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -580,7 +580,7 @@ TEST_F(ToolMDSClientTest, ExtendVolume_Fail) { return; - // 返回码不为OK + // The return code is not OK curve::mds::ExtendFileResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ExtendFile(_, _, _, _)) @@ -598,7 +598,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { CopySetIdType copysetId = 100; std::vector csLocs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( @@ -613,7 +613,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 返回码不为OK + // The return code is not OK GetChunkServerListInCopySetsResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServerListInCopySets(_, _, _, _)) @@ -626,7 +626,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { ASSERT_EQ(-1, mdsClient.GetChunkServerListInCopySet(logicalPoolId, copysetId, &csLocs)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); CopySetServerInfo csInfo; GetCopysetInfoForTest(&csInfo, 3, copysetId); @@ -646,7 +646,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { ASSERT_EQ(csInfo.cslocs(i).DebugString(), csLocs[i].DebugString()); } - // 测试获取多个copyset + // Test obtaining multiple copysets std::vector expected; response.Clear(); response.set_statuscode(kTopoErrCodeSuccess); @@ -677,7 +677,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerListInCopySets) { TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -691,7 +691,7 @@ TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { })); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 返回码不为OK + // The return code is not OK ListPhysicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPhysicalPool(_, _, _, _)) @@ -704,7 +704,7 @@ TEST_F(ToolMDSClientTest, ListPhysicalPoolsInCluster) { }))); ASSERT_EQ(-1, mdsClient.ListPhysicalPoolsInCluster(&pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_physicalpoolinfos(); @@ -731,7 +731,7 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { PoolIdType poolId = 1; std::vector pools; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( @@ -744,7 +744,7 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { })); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 返回码不为OK + // The return code is not OK ListLogicalPoolResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListLogicalPool(_, _, _, _)) @@ -757,7 +757,7 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { }))); ASSERT_EQ(-1, mdsClient.ListLogicalPoolsInPhysicalPool(poolId, &pools)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto poolInfo = response.add_logicalpoolinfos(); @@ -783,7 +783,7 @@ TEST_F(ToolMDSClientTest, ListLogicalPoolsInPhysicalPool) { TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { PoolIdType poolId = 1; std::vector zones; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -798,7 +798,7 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { })); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListPoolZoneResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListPoolZone(_, _, _, _)) @@ -809,7 +809,7 @@ TEST_F(ToolMDSClientTest, ListZoneInPhysicalPool) { curve::mds::topology::ListPoolZoneResponse *response, Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListZoneInPhysicalPool(poolId, &zones)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto zoneInfo = response.add_zones(); @@ -835,7 +835,7 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { ZoneIdType zoneId; std::vector servers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( @@ -850,7 +850,7 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { })); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListZoneServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListZoneServer(_, _, _, _)) @@ -863,7 +863,7 @@ TEST_F(ToolMDSClientTest, ListServersInZone) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListServersInZone(zoneId, &servers)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto serverInfo = response.add_serverinfo(); @@ -890,7 +890,7 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { ServerIdType serverId = 1; std::vector chunkservers; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( @@ -905,7 +905,7 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { })); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::ListChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, ListChunkServer(_, _, _, _)) @@ -918,7 +918,7 @@ TEST_F(ToolMDSClientTest, ListChunkServersOnServer) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListChunkServersOnServer(serverId, &chunkservers)); - // 正常情况,两个chunkserver正常,一个chunkserver retired + // Under normal circumstances, two chunkservers are normal and one chunkserver retired response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 3; i++) { auto csInfo = response.add_chunkserverinfos(); @@ -946,7 +946,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { std::string csAddr = "127.0.0.1:8200"; ChunkServerInfo chunkserver; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( @@ -962,7 +962,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 返回码不为OK + // The return code is not OK curve::mds::topology::GetChunkServerInfoResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetChunkServer(_, _, _, _)) @@ -978,7 +978,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csId, &chunkserver)); ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); ChunkServerInfo *csInfo = new ChunkServerInfo(); GetChunkServerInfoForTest(1, csInfo); @@ -999,7 +999,7 @@ TEST_F(ToolMDSClientTest, GetChunkServerInfo) { GetChunkServerInfoForTest(1, &expected); ASSERT_EQ(expected.DebugString(), chunkserver.DebugString()); - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetChunkServerInfo(csAddr, &chunkserver)); csAddr = "127.0.0.1"; @@ -1013,7 +1013,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { std::string csAddr = "127.0.0.1:8200"; std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) .Times(12) .WillRepeatedly(Invoke( @@ -1028,7 +1028,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 返回码不为OK + // The return code is not OK GetCopySetsInChunkServerResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInChunkServer(_, _, _, _)) @@ -1042,7 +1042,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csId, ©sets)); ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); - // 正常情况 + // Normal situatio response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1066,7 +1066,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { ASSERT_EQ(1, copysets[i].logicalpoolid()); ASSERT_EQ(1000 + i, copysets[i].copysetid()); } - // chunkserver地址不合法的情况 + // Illegal chunkserver address csAddr = ""; ASSERT_EQ(-1, mdsClient.GetCopySetsInChunkServer(csAddr, ©sets)); csAddr = "127.0.0.1"; @@ -1078,7 +1078,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInChunkServer) { TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { std::vector copysets; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -1092,7 +1092,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { })); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 返回码不为OK + // The return code is not O GetCopySetsInClusterResponse response; response.set_statuscode(curve::mds::topology::kTopoErrCodeInitFail); EXPECT_CALL(*topoService, GetCopySetsInCluster(_, _, _, _)) @@ -1105,7 +1105,7 @@ TEST_F(ToolMDSClientTest, GetCopySetsInCluster) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.GetCopySetsInCluster(©sets)); - // 正常情况 + // Normal situation response.set_statuscode(kTopoErrCodeSuccess); for (int i = 0; i < 5; ++i) { auto copysetInfo = response.add_copysetinfos(); @@ -1184,7 +1184,7 @@ TEST_F(ToolMDSClientTest, GetCopyset) { } TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -1198,7 +1198,7 @@ TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { })); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 返回码不为OK + // The return code is not OK RapidLeaderScheduleResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrCodeInvalidLogicalPool); @@ -1211,7 +1211,7 @@ TEST_F(ToolMDSClientTest, RapidLeaderSchedule) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.RapidLeaderSchedule(1)); - // 成功 + // Success response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, RapidLeaderSchedule(_, _, _, _)) .WillOnce(DoAll( @@ -1267,7 +1267,7 @@ TEST_F(ToolMDSClientTest, SetLogicalPoolScanState) { TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { std::map statusMap; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .Times(6) .WillRepeatedly(Invoke( @@ -1281,7 +1281,7 @@ TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { })); ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 1. QueryChunkServerRecoverStatus失败的情况 + // 1. QueryChunkServerRecoverStatus failed situation QueryChunkServerRecoverStatusResponse response; response.set_statuscode( curve::mds::schedule::kScheduleErrInvalidQueryChunkserverID); @@ -1295,7 +1295,7 @@ TEST_F(ToolMDSClientTest, QueryChunkServerRecoverStatus) { ASSERT_EQ(-1, mdsClient.QueryChunkServerRecoverStatus( std::vector{}, &statusMap)); - // 2. QueryChunkServerRecoverStatus成功的情况 + // 2. Success of QueryChunkServerRecoverStatus response.set_statuscode(curve::mds::schedule::kScheduleErrCodeSuccess); EXPECT_CALL(*scheduleService, QueryChunkServerRecoverStatus(_, _, _, _)) .WillOnce(DoAll( @@ -1329,12 +1329,12 @@ TEST_F(ToolMDSClientTest, GetMetric) { TEST_F(ToolMDSClientTest, GetCurrentMds) { bvar::Status value; value.expose("mds_status"); - // 有leader + // With a leader value.set_value("leader"); std::vector curMds = mdsClient.GetCurrentMds(); ASSERT_EQ(1, curMds.size()); ASSERT_EQ("127.0.0.1:9192", curMds[0]); - // 没有leader + // No leader value.set_value("follower"); ASSERT_TRUE(mdsClient.GetCurrentMds().empty()); } @@ -1343,20 +1343,20 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { bvar::Status value; value.expose("mds_config_mds_listen_addr"); std::map onlineStatus; - // 9180在线,9999不在线 + // 9180 online, 9999 offline value.set_value("{\"conf_name\":\"mds.listen.addr\"," "\"conf_value\":\"127.0.0.1:9192\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); std::map expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", true}}; ASSERT_EQ(expected, onlineStatus); - // 9180的服务端口不一致 + // The service ports of 9180 are inconsistent value.set_value("{\"conf_name\":\"mds.listen.addr\"," "\"conf_value\":\"127.0.0.1:9188\"}"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; ASSERT_EQ(expected, onlineStatus); - // 非json格式 + // Non JSON format value.set_value("127.0.0.1::9191"); mdsClient.GetMdsOnlineStatus(&onlineStatus); expected = {{"127.0.0.1:9191", false}, {"127.0.0.1:9192", false}}; @@ -1366,7 +1366,7 @@ TEST_F(ToolMDSClientTest, GetMdsOnlineStatus) { TEST_F(ToolMDSClientTest, ListClient) { std::vector clientAddrs; - // 发送rpc失败 + // Failed to send rpc EXPECT_CALL(*nameService, ListClient(_, _, _, _)) .Times(6) .WillRepeatedly( @@ -1380,7 +1380,7 @@ TEST_F(ToolMDSClientTest, ListClient) { })); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 返回码不为OK + // The return code is not OK curve::mds::ListClientResponse response; response.set_statuscode(curve::mds::StatusCode::kParaError); EXPECT_CALL(*nameService, ListClient(_, _, _, _)) @@ -1392,7 +1392,7 @@ TEST_F(ToolMDSClientTest, ListClient) { Closure *done) { brpc::ClosureGuard doneGuard(done); }))); ASSERT_EQ(-1, mdsClient.ListClient(&clientAddrs)); - // 正常情况 + // Normal situation response.set_statuscode(curve::mds::StatusCode::kOK); for (int i = 0; i < 5; i++) { auto clientInfo = response.add_clientinfos(); diff --git a/test/tools/metric_client_test.cpp b/test/tools/metric_client_test.cpp index 30f6c78802..a7f419b194 100644 --- a/test/tools/metric_client_test.cpp +++ b/test/tools/metric_client_test.cpp @@ -48,7 +48,7 @@ class MetricClientTest : public ::testing::Test { TEST_F(MetricClientTest, GetMetric) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "string_metric"; bvar::Status metric(metricName, "value"); std::string value; @@ -56,11 +56,11 @@ TEST_F(MetricClientTest, GetMetric) { metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 + // Bvar does not exist ASSERT_EQ(MetricRet::kNotFound, client.GetMetric(serverAddr, "not-exist-metric", &value)); - // 其他错误 + // Other errors ASSERT_EQ(MetricRet::kOtherErr, client.GetMetric("127.0.0.1:9191", "not-exist-metric", &value)); @@ -68,7 +68,7 @@ TEST_F(MetricClientTest, GetMetric) { TEST_F(MetricClientTest, GetMetricUint) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "uint_metric"; bvar::Status metric(metricName, 10); uint64_t value; @@ -76,15 +76,15 @@ TEST_F(MetricClientTest, GetMetricUint) { metricName, &value)); ASSERT_EQ(10, value); - // bvar不存在 + // Bvar does not exist ASSERT_EQ(MetricRet::kNotFound, client.GetMetricUint(serverAddr, "not-exist-metric", &value)); - // 其他错误 + // Other errors ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint("127.0.0.1:9191", "not-exist-metric", &value)); - // 解析失败 + // Parsing failed bvar::Status metric2("string_metric", "value"); ASSERT_EQ(MetricRet::kOtherErr, client.GetMetricUint(serverAddr, "string_metric", @@ -93,7 +93,7 @@ TEST_F(MetricClientTest, GetMetricUint) { TEST_F(MetricClientTest, GetConfValue) { MetricClient client; - // 正常情况 + // Normal situation std::string metricName = "conf_metric"; bvar::Status conf_metric(metricName, ""); conf_metric.set_value("{\"conf_name\":\"key\"," @@ -103,17 +103,17 @@ TEST_F(MetricClientTest, GetConfValue) { metricName, &value)); ASSERT_EQ("value", value); - // bvar不存在 + // Bvar does not exist ASSERT_EQ(MetricRet::kNotFound, client.GetConfValueFromMetric( serverAddr, "not-exist-metric", &value)); - // 其他错误 + // Other errors ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( "127.0.0.1:9191", "not-exist-metric", &value)); - // 解析失败 + // Parsing failed conf_metric.set_value("string"); ASSERT_EQ(MetricRet::kOtherErr, client.GetConfValueFromMetric( serverAddr, diff --git a/test/tools/namespace_tool_core_test.cpp b/test/tools/namespace_tool_core_test.cpp index e1b365b28f..08b3bbd160 100644 --- a/test/tools/namespace_tool_core_test.cpp +++ b/test/tools/namespace_tool_core_test.cpp @@ -144,7 +144,7 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { uint64_t stripeCount = 32; std::string pstName = ""; - // 1、正常情况 + //1. Normal situation EXPECT_CALL(*client_, CreateFile(_)) .Times(1) .WillOnce(Return(0)); @@ -159,7 +159,7 @@ TEST_F(NameSpaceToolCoreTest, CreateFile) { ASSERT_EQ(0, namespaceTool.CreateFile(context)); - // 2、创建失败 + //2. Creation failed EXPECT_CALL(*client_, CreateFile(_)) .Times(1) .WillOnce(Return(-1)); @@ -170,13 +170,13 @@ TEST_F(NameSpaceToolCoreTest, ExtendVolume) { curve::tool::NameSpaceToolCore namespaceTool(client_); std::string fileName = "/test"; uint64_t length = 10 * segmentSize; - // 1、正常情况 + //1. Normal situation EXPECT_CALL(*client_, ExtendVolume(_, _)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.ExtendVolume(fileName, length)); - // 2、创建失败 + //2. Creation failed EXPECT_CALL(*client_, ExtendVolume(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -188,13 +188,13 @@ TEST_F(NameSpaceToolCoreTest, DeleteFile) { std::string fileName = "/test"; bool forceDelete = false; - // 1、正常情况 + //1. Normal situation EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.DeleteFile(fileName, forceDelete)); - // 2、创建失败 + //2. Creation failed EXPECT_CALL(*client_, DeleteFile(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -213,7 +213,7 @@ TEST_F(NameSpaceToolCoreTest, GetChunkServerListInCopySet) { expected.emplace_back(csLoc); } - // 1、正常情况 + //1. Normal situation EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<2>(expected), @@ -224,7 +224,7 @@ TEST_F(NameSpaceToolCoreTest, GetChunkServerListInCopySet) { for (uint64_t i = 0; i < expected.size(); ++i) { ASSERT_EQ(expected[i].DebugString(), csLocs[i].DebugString()); } - // 2、失败 + //2. Failure EXPECT_CALL(*client_, GetChunkServerListInCopySet(_, _, _)) .Times(1) .WillOnce(Return(-1)); @@ -355,7 +355,7 @@ TEST_F(NameSpaceToolCoreTest, CleanRecycleBin) { TEST_F(NameSpaceToolCoreTest, GetAllocatedSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + //1. Normal situation uint64_t allocSize; EXPECT_CALL(*client_, GetAllocatedSize(_, _, _)) .Times(1) @@ -374,7 +374,7 @@ TEST_F(NameSpaceToolCoreTest, QueryChunkCopyset) { uint64_t chunkId; std::pair copyset; - // 正常情况 + //Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), @@ -389,14 +389,14 @@ TEST_F(NameSpaceToolCoreTest, QueryChunkCopyset) { ASSERT_EQ(1, copyset.first); ASSERT_EQ(1001, copyset.second); - // GetFileInfo失败 + //GetFileInfo failed EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.QueryChunkCopyset(fileName, offset, &chunkId, ©set)); - // GetSegmentInfo失败 + //GetSegmentInfo failed EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), @@ -417,7 +417,7 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { PageFileSegment expected; GetSegmentForTest(&expected); - // 1、正常情况 + //1. Normal situation EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), @@ -433,13 +433,13 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { ASSERT_EQ(expected.DebugString(), segments[i].DebugString()); } - // 2、GetFileInfo失败的情况 + //2. The situation of GetFileInfo failure EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.GetFileSegments(fileName, &segments)); - // 3、获取segment失败 + //3. Failed to obtain segment EXPECT_CALL(*client_, GetFileInfo(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), @@ -452,7 +452,7 @@ TEST_F(NameSpaceToolCoreTest, GetFileSegments) { TEST_F(NameSpaceToolCoreTest, GetFileSize) { curve::tool::NameSpaceToolCore namespaceTool(client_); - // 1、正常情况 + //1. Normal situation uint64_t size; EXPECT_CALL(*client_, GetFileSize(_, _)) .Times(1) diff --git a/test/tools/namespace_tool_test.cpp b/test/tools/namespace_tool_test.cpp index a8202bda39..4775ecd822 100644 --- a/test/tools/namespace_tool_test.cpp +++ b/test/tools/namespace_tool_test.cpp @@ -106,7 +106,7 @@ TEST_F(NameSpaceToolTest, GetFile) { PageFileSegment segment; GetSegmentForTest(&segment); FLAGS_fileName = "/test/"; - // 0、Init失败 + // 0. Init failed EXPECT_CALL(*core_, Init(_)) .Times(1) .WillOnce(Return(-1)); @@ -117,7 +117,7 @@ TEST_F(NameSpaceToolTest, GetFile) { .WillOnce(Return(0)); ASSERT_EQ(-1, namespaceTool.RunCommand("abc")); - // 1、正常情况 + // 1. Normal situation FLAGS_showAllocMap = true; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) @@ -131,13 +131,13 @@ TEST_F(NameSpaceToolTest, GetFile) { Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 2、获取fileInfo失败 + // 2. Failed to obtain fileInfo EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 3、计算大小失败 + // 3. Calculation of size failed EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(fileInfo), @@ -147,7 +147,7 @@ TEST_F(NameSpaceToolTest, GetFile) { .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("get")); - // 4、get的是目录的话还要计算file size + // 4. If the target is a directory, the file size should also be calculated FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); @@ -165,7 +165,7 @@ TEST_F(NameSpaceToolTest, GetFile) { Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 5、指定了-showAllocSize=false的话不计算分配大小 + // 5. If - showAllocSize=false is specified, the allocation size will not be calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) .Times(1) @@ -173,7 +173,7 @@ TEST_F(NameSpaceToolTest, GetFile) { Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("get")); - // 6、对目录指定了-showFileSize=false的话不计算文件大小 + //6. If - showFileSize=false is specified for the directory, the file size will not be calculated FLAGS_showFileSize = false; FLAGS_showAllocSize = false; EXPECT_CALL(*core_, GetFileInfo(_, _)) @@ -194,7 +194,7 @@ TEST_F(NameSpaceToolTest, ListDir) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation std::vector files; for (uint64_t i = 0; i < 3; ++i) { files.emplace_back(fileInfo); @@ -212,13 +212,13 @@ TEST_F(NameSpaceToolTest, ListDir) { FLAGS_fileName = "/test/"; ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 2、listDir失败 + // 2. ListDir failed EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 3、计算大小失败,个别的文件计算大小失败会继续计算,但是返回-1 + // 3. Failed to calculate the size. Some files will continue to be calculated if the size calculation fails, but will return -1 EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(files), @@ -230,7 +230,7 @@ TEST_F(NameSpaceToolTest, ListDir) { Return(0))); ASSERT_EQ(-1, namespaceTool.RunCommand("list")); - // 4、指定了-showAllocSize=false的话不计算分配大小 + // 4. If - showAllocSize=false is specified, the allocation size will not be calculated FLAGS_showAllocSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) @@ -238,7 +238,7 @@ TEST_F(NameSpaceToolTest, ListDir) { Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 4、list的时候有目录的话计算fileSize + // 4. If there is a directory in the list, calculate fileSize FileInfo fileInfo2; GetFileInfoForTest(&fileInfo2); fileInfo2.set_filetype(curve::mds::FileType::INODE_DIRECTORY); @@ -253,7 +253,7 @@ TEST_F(NameSpaceToolTest, ListDir) { Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("list")); - // 5、指定了-showFileSize=false的话不计算文件大小 + // 5. If - showFileSize=false is specified, the file size will not be calculated FLAGS_showFileSize = false; EXPECT_CALL(*core_, ListDir(_, _)) .Times(1) @@ -276,14 +276,14 @@ TEST_F(NameSpaceToolTest, SegInfo) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, GetFileSegments(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<1>(segments), Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("seginfo")); - // 2、GetFileSegment失败 + // 2. GetFileSegment failed EXPECT_CALL(*core_, GetFileSegments(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -297,13 +297,13 @@ TEST_F(NameSpaceToolTest, CreateFile) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, CreateFile(_)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("create")); - // 2、创建失败 + // 2. Creation failed EXPECT_CALL(*core_, CreateFile(_)) .Times(1) .WillOnce(Return(-1)); @@ -317,13 +317,13 @@ TEST_F(NameSpaceToolTest, DeleteFile) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, DeleteFile(_, _)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("delete")); - // 2、创建失败 + // 2. Creation failed EXPECT_CALL(*core_, DeleteFile(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -337,13 +337,13 @@ TEST_F(NameSpaceToolTest, CleanRecycle) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, CleanRecycleBin(_, _)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, namespaceTool.RunCommand("clean-recycle")); - // 2、失败 + // 2. Failure EXPECT_CALL(*core_, CleanRecycleBin(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -365,7 +365,7 @@ TEST_F(NameSpaceToolTest, PrintChunkLocation) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<2>(chunkId), @@ -377,13 +377,13 @@ TEST_F(NameSpaceToolTest, PrintChunkLocation) { Return(0))); ASSERT_EQ(0, namespaceTool.RunCommand("chunk-location")); - // 2、QueryChunkCopyset失败 + // 2. QueryChunkCopyset failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, namespaceTool.RunCommand("chunk-location")); - // 3、GetChunkServerListInCopySet失败 + // 3. GetChunkServerListInCopySet failed EXPECT_CALL(*core_, QueryChunkCopyset(_, _, _, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<2>(chunkId), diff --git a/test/tools/raft_log_tool_test.cpp b/test/tools/raft_log_tool_test.cpp index ff70a5ef8b..280deb493f 100644 --- a/test/tools/raft_log_tool_test.cpp +++ b/test/tools/raft_log_tool_test.cpp @@ -58,20 +58,20 @@ TEST_F(RaftLogToolTest, PrintHeaders) { raftLogTool.PrintHelp("chunk-meta"); ASSERT_EQ(-1, raftLogTool.RunCommand("chunk-meta")); - // 文件名格式不对 + // The file name format is incorrect FLAGS_fileName = "illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); FLAGS_fileName = "/tmp/illegalfilename"; ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // parser初始化失败 + // parser initialization faile FLAGS_fileName = "/tmp/log_inprogress_002"; EXPECT_CALL(*parser_, Init(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 解析失败 + // Parsing failed EXPECT_CALL(*parser_, Init(_)) .Times(1) .WillOnce(Return(0)); @@ -83,7 +83,7 @@ TEST_F(RaftLogToolTest, PrintHeaders) { .WillOnce(Return(false)); ASSERT_EQ(-1, raftLogTool.RunCommand("raft-log-meta")); - // 正常情况 + // Normal situation EXPECT_CALL(*parser_, Init(_)) .Times(1) .WillOnce(Return(0)); diff --git a/test/tools/segment_parser_test.cpp b/test/tools/segment_parser_test.cpp index 3f9e1f465f..16f4a286e1 100644 --- a/test/tools/segment_parser_test.cpp +++ b/test/tools/segment_parser_test.cpp @@ -71,26 +71,26 @@ class SetmentParserTest : public ::testing::Test { TEST_F(SetmentParserTest, Init) { SegmentParser parser(localFs_); - // 1、打开文件失败 + // 1. Failed to open file EXPECT_CALL(*localFs_, Open(_, _)) .Times(3) .WillOnce(Return(-1)) .WillRepeatedly(Return(1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 2、获取文件大小失败 + // 2. Failed to obtain file size EXPECT_CALL(*localFs_, Fstat(_, _)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, parser.Init(fileName)); - // 3、成功 + // 3. Success EXPECT_CALL(*localFs_, Fstat(_, _)) .Times(1) .WillOnce(Return(0)); ASSERT_EQ(0, parser.Init(fileName)); - // 4、反初始化 + // 4. De-initialization EXPECT_CALL(*localFs_, Close(_)) .Times(1) .WillOnce(Return(0)); @@ -120,14 +120,14 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { header.data_checksum = 73235795; char header_buf[ENTRY_HEADER_SIZE] = {0}; - // 读出来的数据大小不对 + // The size of the data read out is incorrect EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) .WillOnce(Return(22)); ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 校验失败 + // Verification failed PackHeader(header, header_buf, true); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(1) @@ -137,7 +137,7 @@ TEST_F(SetmentParserTest, GetNextEntryHeader) { ASSERT_FALSE(parser.GetNextEntryHeader(&header2)); ASSERT_FALSE(parser.SuccessfullyFinished()); - // 正常情况 + // Normal situation PackHeader(header, header_buf); EXPECT_CALL(*localFs_, Read(_, _, _, ENTRY_HEADER_SIZE)) .Times(2) diff --git a/test/tools/snapshot_clone_client_test.cpp b/test/tools/snapshot_clone_client_test.cpp index 024a270a69..70d4cde843 100644 --- a/test/tools/snapshot_clone_client_test.cpp +++ b/test/tools/snapshot_clone_client_test.cpp @@ -50,7 +50,7 @@ TEST_F(SnapshotCloneClientTest, Init) { // no snapshot clone server ASSERT_EQ(1, client.Init("", "")); ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "")); - // dummy server与mds不匹配 + // Dummy server and mds do not match ASSERT_EQ(-1, client.Init("127.0.0.1:5555", "8081,8082,8083")); ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091,9092,9093")); @@ -62,7 +62,7 @@ TEST_F(SnapshotCloneClientTest, Init) { } TEST_F(SnapshotCloneClientTest, GetActiveAddr) { - // 正常情况 + // Normal situation SnapshotCloneClient client(metricClient_); ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); @@ -78,7 +78,7 @@ TEST_F(SnapshotCloneClientTest, GetActiveAddr) { ASSERT_EQ(1, activeAddr.size()); ASSERT_EQ("127.0.0.1:5555", activeAddr[0]); - // 有一个dummyserver显示active,服务端口访问失败 + // There is a dummyserver displaying active, and the service port access failed EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(4) .WillOnce(DoAll(SetArgPointee<2>("active"), @@ -89,7 +89,7 @@ TEST_F(SnapshotCloneClientTest, GetActiveAddr) { activeAddr = client.GetActiveAddrs(); ASSERT_TRUE(activeAddr.empty()); - // 有一个获取metric失败,其他返回standby + // One failed to obtain metric, while the others returned standby EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kNotFound)) @@ -97,7 +97,7 @@ TEST_F(SnapshotCloneClientTest, GetActiveAddr) { Return(MetricRet::kOK))); ASSERT_TRUE(client.GetActiveAddrs().empty()); - // 有两个active状态的 + // Having two active states EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillOnce(DoAll(SetArgPointee<2>("standby"), @@ -114,7 +114,7 @@ TEST_F(SnapshotCloneClientTest, GetOnlineStatus) { SnapshotCloneClient client(metricClient_); ASSERT_EQ(0, client.Init("127.0.0.1:5555,127.0.0.1:5556,127.0.0.1:5557", "9091")); - // 有一个在线,有一个获取metric失败,有一个listen addr不匹配 + // One online, one failed to obtain metric, and one did not match the listen addr EXPECT_CALL(*metricClient_, GetConfValueFromMetric(_, _, _)) .Times(3) .WillOnce(DoAll(SetArgPointee<2>("127.0.0.1:5555"), diff --git a/test/tools/status_tool_test.cpp b/test/tools/status_tool_test.cpp index 8b33183220..4fbd3fcd1e 100644 --- a/test/tools/status_tool_test.cpp +++ b/test/tools/status_tool_test.cpp @@ -156,7 +156,7 @@ TEST_F(StatusToolTest, InitFail) { StatusTool statusTool1(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); - // 1、status命令需要所有的init + //1. The status command requires all inits EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(3) .WillOnce(Return(-1)) @@ -177,7 +177,7 @@ TEST_F(StatusToolTest, InitFail) { ASSERT_EQ(-1, statusTool1.RunCommand("status")); ASSERT_EQ(-1, statusTool1.RunCommand("status")); - // 2、etcd-status命令只需要初始化etcdClinet + //2. The etcd-status command only needs to initialize etcdClinet StatusTool statusTool2(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); @@ -186,7 +186,7 @@ TEST_F(StatusToolTest, InitFail) { .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool2.RunCommand("etcd-status")); - // 3、space和其他命令不需要初始化etcdClient + //3. Space and other commands do not require initialization of etcdClient StatusTool statusTool3(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); @@ -200,7 +200,7 @@ TEST_F(StatusToolTest, InitFail) { ASSERT_EQ(-1, statusTool3.RunCommand("space")); ASSERT_EQ(-1, statusTool3.RunCommand("chunkserver-list")); - // 4、snapshot-clone-status只需要snapshot clone + //4. snapshot-clone-status only requires snapshot clone StatusTool statusTool4(mdsClient_, etcdClient_, copysetCheck_, versionTool_, metricClient_, snapshotClient_); @@ -221,7 +221,7 @@ TEST_F(StatusToolTest, SpaceCmd) { std::vector lgPools; lgPools.emplace_back(lgPool); - // 设置Init的期望 + //Set expectations for Init EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(1) .WillOnce(Return(0)); @@ -229,7 +229,7 @@ TEST_F(StatusToolTest, SpaceCmd) { .Times(1) .WillOnce(Return(0)); - // 1、正常情况 + //1. Normal situation EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(lgPools), @@ -255,13 +255,13 @@ TEST_F(StatusToolTest, SpaceCmd) { ASSERT_EQ(0, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("123")); - // 2、ListLogicalPoolsInPhysicalPool失败的情况 + //2. The situation of ListLogicalPoolsInPhysicalPool failure EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 3、获取filesize失败 + //3. Failed to obtain filesize EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(lgPools), @@ -271,7 +271,7 @@ TEST_F(StatusToolTest, SpaceCmd) { .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 4、获取metric失败的情况 + //4. Failure to obtain metric EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), @@ -288,7 +288,7 @@ TEST_F(StatusToolTest, SpaceCmd) { ASSERT_EQ(-1, statusTool.RunCommand("space")); ASSERT_EQ(-1, statusTool.RunCommand("space")); - // 5、获取RecyleBin大小失败的情况 + //5. Failure in obtaining the size of RecycleBin EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(DoAll(SetArgPointee<0>(lgPools), @@ -319,13 +319,13 @@ TEST_F(StatusToolTest, ChunkServerCmd) { metricClient_, snapshotClient_); statusTool.PrintHelp("chunkserver-list"); std::vector chunkservers; - // 加入5个chunkserver,2个offline + //Add 5 chunkservers and 2 offline ChunkServerInfo csInfo; for (uint64_t i = 1; i <= 5; ++i) { GetCsInfoForTest(&csInfo, i, i <= 2); chunkservers.emplace_back(csInfo); } - // 设置Init的期望 + //Set expectations for Init EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(1) .WillOnce(Return(0)); @@ -333,7 +333,7 @@ TEST_F(StatusToolTest, ChunkServerCmd) { .Times(1) .WillOnce(Return(0)); - // 正常情况,有一个chunkserver的UnhealthyRatio大于0 + //Under normal circumstances, there is a chunkserver with an UnhealthyRatio greater than 0 EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -349,7 +349,7 @@ TEST_F(StatusToolTest, ChunkServerCmd) { .WillRepeatedly(Return(statistics1)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示offline的 + //Only display offline FLAGS_offline = true; EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) @@ -358,7 +358,7 @@ TEST_F(StatusToolTest, ChunkServerCmd) { Return(0))); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // 只显示unhealthy ratio大于0的 + //Show only those with unhealthy ratio greater than 0 EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -376,14 +376,14 @@ TEST_F(StatusToolTest, ChunkServerCmd) { FLAGS_unhealthy = true; ASSERT_EQ(0, statusTool.RunCommand("chunkserver-list")); - // list chunkserver失败 + //List chunkserver failed EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("chunkserver-list")); - // FLAGS_checkCSAlive为true的时候,会发送rpc检查chunkserver在线状态 + //when FLAGS_checkCSAlive is true, an rpc will be sent to check the online status of the chunkserver FLAGS_checkHealth = false; FLAGS_checkCSAlive = true; EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( @@ -440,7 +440,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { } chunkservers.emplace(1, chunkserverList); - // 设置Init的期望 + //Set expectations for Init EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(1) .WillOnce(Return(0)); @@ -454,8 +454,8 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(1) .WillOnce(Return(0)); - // 正常情况 - // 1、设置cluster的输出 + //Normal situation + //1. Set the output of the cluster EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); @@ -493,12 +493,12 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillOnce(DoAll(SetArgPointee<1>(10 * DefaultSegmentSize), Return(0))); - // 设置client status的输出 + //Set the output of client status EXPECT_CALL(*versionTool_, GetClientVersion(_)) .WillOnce(DoAll(SetArgPointee<0>(clientVersionMap), Return(0))); - // 2、设置MDS status的输出 + //2. Set the output of MDS status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(2) .WillRepeatedly(Return(mdsAddr)); @@ -509,7 +509,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), Return(0))); - // 3、设置etcd status的输出 + //3. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(2) .WillRepeatedly(DoAll(SetArgPointee<0>("3.4.1"), @@ -520,7 +520,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { SetArgPointee<1>(onlineState), Return(0))); - // 设置snapshot clone的输出 + //Set the output of snapshot clone std::vector activeAddr = {"127.0.0.1:5555"}; EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), @@ -532,7 +532,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineState)); - // 4、设置chunkserver status的输出 + //4. Set the output of chunkserver status EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An>*>())) .Times(1) @@ -550,7 +550,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("status")); - // 5、设置chunkserver status的输出 + //5. Set the output of chunkserver status EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An>*>())) .Times(1) @@ -564,7 +564,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { .WillRepeatedly(Return(true)); ASSERT_EQ(0, statusTool.RunCommand("chunkserver-status")); - // 6、设置mds status的输出 + //6. Set the output of mds statu EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(mdsAddr)); @@ -576,7 +576,7 @@ TEST_F(StatusToolTest, StatusCmdCommon) { Return(0))); ASSERT_EQ(0, statusTool.RunCommand("mds-status")); - // 7、设置etcd status的输出 + //7. Set the output of etcd status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(leaderAddr), @@ -590,7 +590,7 @@ TEST_F(StatusToolTest, StatusCmdError) { copysetCheck_, versionTool_, metricClient_, snapshotClient_); - // 设置Init的期望 + //Set expectations for Init EXPECT_CALL(*mdsClient_, Init(_, _)) .Times(1) .WillOnce(Return(0)); @@ -611,20 +611,20 @@ TEST_F(StatusToolTest, StatusCmdError) { EXPECT_CALL(*copysetCheck_, GetCopysetStatistics()) .Times(1) .WillOnce(Return(statistics2)); - // 列出物理池失败 + //Failed to list physical pools EXPECT_CALL(*mdsClient_, ListPhysicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 列出逻辑池失败 + //Failed to list logical pools EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillRepeatedly(Return(-1)); - // 获取client version失败 + //Failed to obtain client version EXPECT_CALL(*versionTool_, GetClientVersion(_)) .WillOnce(Return(-1)); - // 2、当前无mds可用 + //2. Currently, no mds are available std::vector failedList = {"127.0.0.1:6666", "127.0.0.1:6667"}; EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) .WillOnce(DoAll(SetArgPointee<1>(failedList), @@ -638,7 +638,7 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(mdsOnlineStatus)); - // 3、GetEtcdClusterStatus失败 + //3. GetEtcdClusterStatus failed EXPECT_CALL(*etcdClient_, GetAndCheckEtcdVersion(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -646,7 +646,7 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(Return(-1)); - // 当前无snapshot clone server可用 + //Currently, no snapshot clone server is available EXPECT_CALL(*versionTool_, GetAndCheckSnapshotCloneVersion(_, _)) .WillOnce(DoAll(SetArgPointee<1>(failedList), Return(0))); @@ -659,7 +659,7 @@ TEST_F(StatusToolTest, StatusCmdError) { .Times(2) .WillRepeatedly(SetArgPointee<0>(onlineStatus)); - // 4、获取chunkserver version失败并ListChunkServersInCluster失败 + //4. Failed to obtain chunkserver version and ListChunkServersInCluster EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) .WillOnce(Return(-1)); EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( @@ -668,19 +668,19 @@ TEST_F(StatusToolTest, StatusCmdError) { .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("status")); - // 获取mds在线状态失败 + //Failed to obtain mds online status EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(mdsOnlineStatus)); - // 获取mdsversion失败 + //Failed to obtain mdsversion EXPECT_CALL(*versionTool_, GetAndCheckMdsVersion(_, _)) .WillOnce(Return(-1)); ASSERT_EQ(-1, statusTool.RunCommand("mds-status")); - // 个别chunkserver获取version失败 + //Individual chunkservers failed to obtain version EXPECT_CALL(*versionTool_, GetAndCheckChunkServerVersion(_, _)) .WillOnce(DoAll(SetArgPointee<0>("0.0.1"), SetArgPointee<1>(failedList), @@ -702,55 +702,55 @@ TEST_F(StatusToolTest, IsClusterHeatlhy) { std::map onlineStatus2 = {{"127.0.0.1:8001", true}, {"127.0.0.1:8002", false}, {"127.0.0.1:8003", true}}; - // 1、copysets不健康 + //1. Copysets are unhealthy EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(-1)); - // 2、没有mds可用 + //2. No mds available EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector())); - // 3、有mds不在线 + //3. There are MDSs that are not online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); - // 4、获取etcd集群状态失败 + //4. Failed to obtain the ETCD cluster status EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(Return(-1)); - // 5、没有snapshot-clone-server可用 + //5. No snapshot-clone-server available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector())); - // 6、有snapshot-clone-server不在线 + //6. There is snapshot-clone-server that is not online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus2)); ASSERT_FALSE(statusTool.IsClusterHeatlhy()); - // 1、copyset健康 + //1. Copyset Health EXPECT_CALL(*copysetCheck_, CheckCopysetsInCluster()) .Times(1) .WillOnce(Return(0)); - // 2、超过一个mds在服务 + //2. More than one mds is in service EXPECT_CALL(*mdsClient_, GetCurrentMds()) .Times(1) .WillOnce(Return(std::vector(2))); - // 3、mds都在线 + //3. MDS is all online EXPECT_CALL(*mdsClient_, GetMdsOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); - // 4、etcd没有leader且有etcd不在线 + //4. ETCD does not have a leader and there are ETCDs that are not online EXPECT_CALL(*etcdClient_, GetEtcdClusterStatus(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(std::vector()), SetArgPointee<1>(onlineStatus2), Return(0))); - // 5、有多个snapshot-clone-server可用 + //5. Multiple snapshot-clone-server are available EXPECT_CALL(*snapshotClient_, GetActiveAddrs()) .Times(1) .WillOnce(Return(std::vector(2))); - // 9、snapshot-clone-server都在线 + //9. snapshot-clone-server is all online EXPECT_CALL(*snapshotClient_, GetOnlineStatus(_)) .Times(1) .WillOnce(SetArgPointee<0>(onlineStatus)); @@ -772,13 +772,13 @@ TEST_F(StatusToolTest, ListClientCmd) { for (int i = 0; i < 10; ++i) { clientAddrs.emplace_back("127.0.0.1:900" + std::to_string(i)); } - // 成功 + //Success EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("client-list")); - // 失败 + //Failed EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -802,13 +802,13 @@ TEST_F(StatusToolTest, ServerList) { GetServerInfoForTest(&server, i); servers.emplace_back(server); } - // 成功 + //Success EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(servers), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("server-list")); - // 失败 + //Failed EXPECT_CALL(*mdsClient_, ListServersInCluster(_)) .Times(1) .WillOnce(Return(-1)); @@ -832,7 +832,7 @@ TEST_F(StatusToolTest, LogicalPoolList) { GetLogicalPoolForTest(i, &lgPool); lgPools.emplace_back(lgPool); } - // 成功 + //Success EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(lgPools), @@ -842,7 +842,7 @@ TEST_F(StatusToolTest, LogicalPoolList) { .WillOnce(DoAll(SetArgPointee<2>(allocMap), Return(0))); ASSERT_EQ(0, statusTool.RunCommand("logical-pool-list")); - // 失败 + //Failed EXPECT_CALL(*mdsClient_, ListLogicalPoolsInCluster(_)) .Times(1) .WillOnce(Return(-1)); diff --git a/test/tools/version_tool_test.cpp b/test/tools/version_tool_test.cpp index 64581f73ac..63674e4fe4 100644 --- a/test/tools/version_tool_test.cpp +++ b/test/tools/version_tool_test.cpp @@ -78,7 +78,7 @@ TEST_F(VersionToolTest, GetAndCheckMdsVersion) { {"127.0.0.1:6668", "127.0.0.1:6669"}, {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -92,7 +92,7 @@ TEST_F(VersionToolTest, GetAndCheckMdsVersion) { ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分mds curve_version失败 + // 2. Obtain partial mds curve_version failed EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -106,7 +106,7 @@ TEST_F(VersionToolTest, GetAndCheckMdsVersion) { std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) @@ -116,7 +116,7 @@ TEST_F(VersionToolTest, GetAndCheckMdsVersion) { ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -130,7 +130,7 @@ TEST_F(VersionToolTest, GetAndCheckMdsVersion) { ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -151,7 +151,7 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { chunkservers.emplace_back(csInfo); } - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -168,7 +168,7 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、ListChunkServersInCluster失败 + // 2. ListChunkServersInCluster failed EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -176,7 +176,7 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); - // 3、获取metric失败 + // 3. Failed to obtain metric EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -192,7 +192,7 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { std::vector expectList = {"127.0.0.1:9191"}; ASSERT_EQ(expectList, failedList); - // 4、chunkserverList为空 + // 4. chunkserverList is empty EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -204,7 +204,7 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、version不一致 + // 5. version inconsistency EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -221,7 +221,7 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { &failedList)); ASSERT_TRUE(failedList.empty()); - // 6、老版本 + //6. Old version EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( An*>())) .Times(1) @@ -242,7 +242,7 @@ TEST_F(VersionToolTest, GetClientVersion) { {"127.0.0.1:8000", "127.0.0.1:8001", "127.0.0.1:8002", "127.0.0.1:8003", "127.0.0.1:8004", "127.0.0.1:8005"}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), @@ -282,7 +282,7 @@ TEST_F(VersionToolTest, GetClientVersion) { ASSERT_EQ(0, versionTool.GetClientVersion(&clientVersionMap)); ASSERT_EQ(expected, clientVersionMap); - // 2、ListClient失败 + // 2. ListClient failed EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) .WillOnce(Return(-1)); @@ -296,7 +296,7 @@ TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { {"127.0.0.1:6668", "127.0.0.1:6669"}, {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -311,7 +311,7 @@ TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分curve_version失败 + // 2. Obtain partial curve_version failed EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -326,7 +326,7 @@ TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) @@ -337,7 +337,7 @@ TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -352,7 +352,7 @@ TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); diff --git a/test/util/config_generator.h b/test/util/config_generator.h index f0508e58ca..adf5082291 100644 --- a/test/util/config_generator.h +++ b/test/util/config_generator.h @@ -32,7 +32,7 @@ namespace curve { using curve::common::Configuration; -// 各模块继承该接口,实现自己的初始化配置函数 +// Each module inherits this interface and implements its own initialization configuration function class ConfigGenerator { public: ConfigGenerator() = default; @@ -51,15 +51,15 @@ class ConfigGenerator { configPath_ = configPath; } - // 设置配置项 + // Set Configuration Items virtual void SetKV(const std::string& key, const std::string& value) { config_.SetValue(key, value); } /** - * @brief 批量设置配置项 + * @brief Batch Set Configuration Items * - * @param options 配置项表,形如 "Ip=127.0.0.1" + * @param options configuration item table, in the form of "Ip=127.0.0.1" */ virtual void SetConfigOptions( const std::vector &options) { @@ -71,7 +71,7 @@ class ConfigGenerator { } } - // 用于生成配置文件 + // Used to generate configuration files virtual bool Generate() { if (configPath_ != "") { config_.SetConfigPath(configPath_); @@ -85,15 +85,15 @@ class ConfigGenerator { return Generate(); } - // 删除配置文件 + // Delete Profile virtual int Remove() { return ::remove(configPath_.c_str()); } protected: - // 配置文件路径 + // Configuration file path std::string configPath_; - // 配置器 + // Configurator Configuration config_; }; diff --git a/thirdparties/brpc/brpc.patch b/thirdparties/brpc/brpc.patch index 0799d8b281..d996d3587f 100644 --- a/thirdparties/brpc/brpc.patch +++ b/thirdparties/brpc/brpc.patch @@ -459,7 +459,7 @@ index 4ff38c5..b36de87 100644 +bool PrometheusMetricsDumper::DumpStatusJsonString( + const std::string& name, const std::string& desc) { -+ // 如果不是json格式,返回false, 是json格式则print ++ // If it is not in JSON format, return false; if it is in JSON format, print + BUTIL_RAPIDJSON_NAMESPACE::Document d; + if (desc.size() == 2) { + return false; @@ -472,7 +472,7 @@ index 4ff38c5..b36de87 100644 + std::string("# TYPE ") + name + std::string(" gauge\n") + + name + std::string("{"); + -+ // 遍历json并打印 ++ // Traverse JSON and print + int count = 0; + BUTIL_RAPIDJSON_NAMESPACE::Value::MemberIterator it = d.MemberBegin(); + while (it != d.MemberEnd()) { @@ -2444,7 +2444,7 @@ index fe9055d..38dd7cb 100644 + + ASSERT_EQ("test4{name=\"haorooms\",address=\"word\"} 0\n", strformat); + -+ // 运行结果 ++ // run result + // [==========] Running 1 test from 1 test case. + // [----------] Global test environment set-up. + // [----------] 1 test from PrometheusMetricsDumperTest diff --git a/thirdparties/brpc/fix-gcc11.patch b/thirdparties/brpc/fix-gcc11.patch index 34b36d7066..52d4bea55a 100644 --- a/thirdparties/brpc/fix-gcc11.patch +++ b/thirdparties/brpc/fix-gcc11.patch @@ -56,17 +56,17 @@ index f8e1a491..b16d9487 100644 +++ b/docs/cn/thread_local.md @@ -57,9 +57,9 @@ Use *p ... - still the errno of original pthread, undefined b - 严格地说这个问题不是gcc4导致的,而是glibc给__errno_location的签名不够准确,一个返回thread-local指针的函数依赖于段寄存器(TLS的一般实现方式),这怎么能算const呢?由于我们还未找到覆盖__errno_location的方法,所以这个问题目前实际的解决方法是: + Strictly speaking, this issue is not caused by gcc4, but rather by the inaccurate signature of __errno_location provided by glibc. A function that returns a thread-local pointer depends on the segment register (a common implementation of TLS), which can't truly be considered const. Since we haven't found a method to override __errno_location yet, the current practical solution for this issue is as follows: --**务必在直接或间接使用bthread的项目的gcc编译选项中添加`-D__const__=`,即把`__const__`定义为空,避免gcc4做相关优化。** -+**务必在直接或间接使用bthread的项目的gcc编译选项中添加`-D__const__=__unused__`,即把`__const__`定义为空,避免gcc4做相关优化。** +-**Make sure to add `-D__const__=` to the gcc compilation options of projects that directly or indirectly use bthread, This defines `__const__`as empty, preventing gcc4 optimizations.** ++**Make sure to add `-D__const__=__unused__` to the gcc compilation options of projects that directly or indirectly use bthread, This defines `__const__`as empty, preventing gcc4 optimizations.** - 把`__const__`定义为空对程序其他部分的影响几乎为0。另外如果你没有**直接**使用errno(即你的项目中没有出现errno),或使用的是gcc --3.4,即使没有定义`-D__const__=`,程序的正确性也不会受影响,但为了防止未来可能的问题,我们强烈建议加上。 -+3.4,即使没有定义`-D__const__=__unused__`,程序的正确性也不会受影响,但为了防止未来可能的问题,我们强烈建议加上。 + Defining `__const__`as empty has minimal impact on other parts of the program. Additionally, if you're not **directly** using errno (meaning errno doesn't appear in your project), or if you're using GCC +-3.4,Even with GCC 3.4, the correctness of the program won't be affected even if you don't define`-D__const__=`,, but to safeguard against potential future issues, we strongly recommend adding it. ++3.4,Even with GCC 3.4, the correctness of the program won't be affected even if you don't define`-D__const__=__unused__`,, but to safeguard against potential future issues, we strongly recommend adding it. --需要说明的是,和errno类似,pthread_self也有类似的问题,不过一般pthread_self除了打日志没有其他用途,影响面较小,在`-D__const__=`后pthread_self也会正常。 -+需要说明的是,和errno类似,pthread_self也有类似的问题,不过一般pthread_self除了打日志没有其他用途,影响面较小,在`-D__const__=__unused__`后pthread_self也会正常。 +-It's important to note that, similar to errno, pthread_self also faces similar issues. However, in most cases, pthread_self serves little purpose beyond logging, and its impact is minimal. After defining`-D__const__=`, pthread_self will also function correctly. ++It's important to note that, similar to errno, pthread_self also faces similar issues. However, in most cases, pthread_self serves little purpose beyond logging, and its impact is minimal. After defining`-D__const__=__unused__`, pthread_self will also function correctly. diff --git a/example/asynchronous_echo_c++/CMakeLists.txt b/example/asynchronous_echo_c++/CMakeLists.txt index 91c3953f..74414e2a 100644 --- a/example/asynchronous_echo_c++/CMakeLists.txt diff --git a/thirdparties/etcdclient/etcdclient.go b/thirdparties/etcdclient/etcdclient.go index dc7df6d691..493d2a807f 100644 --- a/thirdparties/etcdclient/etcdclient.go +++ b/thirdparties/etcdclient/etcdclient.go @@ -21,7 +21,7 @@ package main enum EtcdErrCode { - // grpc errCode, 具体的含义见: + // grpc errCode, for specific meanings, refer to: // https://godoc.org/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes#ErrGRPCNoSpace // https://godoc.org/google.golang.org/grpc/codes#Code EtcdOK = 0, @@ -42,7 +42,7 @@ enum EtcdErrCode EtcdDataLoss = 15, EtcdUnauthenticated = 16, - // 自定义错误码 + // Custom Error Codes EtcdTxnUnkownOp = 17, EtcdObjectNotExist = 18, EtcdErrObjectType = 19, @@ -198,7 +198,7 @@ func GetErrCode(op string, err error) C.enum_EtcdErrCode { return C.EtcdUnknown } -// TODO(lixiaocui): 日志打印看是否需要glog +// TODO(lixiaocui): Log printing to see if glog is required //export NewEtcdClientV3 func NewEtcdClientV3(conf C.struct_EtcdConf) C.enum_EtcdErrCode { var err error @@ -271,7 +271,7 @@ func EtcdClientGet(timeout C.int, key *C.char, resp.Header.Revision } -// TODO(lixiaocui): list可能需要有长度限制 +// TODO(lixiaocui): list may require a length limit //export EtcdClientList func EtcdClientList(timeout C.int, startKey, endKey *C.char, startLen, endLen C.int) (C.enum_EtcdErrCode, uint64, int64) { @@ -425,7 +425,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, goPfx := C.GoStringN(pfx, pfxLen) goLeaderName := C.GoStringN(leaderName, nameLen) - // 创建带ttl的session + // Create a session with ttl var sessionOpts concurrency.SessionOption = concurrency.WithTTL(int(sessionInterSec)) session, err := concurrency.NewSession(globalClient, sessionOpts) if err != nil { @@ -433,7 +433,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, return C.EtcdCampaignInternalErr, 0 } - // 创建election和超时context + // Create an election and timeout context var election *concurrency.Election = concurrency.NewElection(session, goPfx) var ctx context.Context var cancel context.CancelFunc @@ -448,7 +448,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, wg.Add(2) defer wg.Wait() - // 监测当前的leader + // Monitor the current leader obCtx, obCancel := context.WithCancel(context.Background()) observer := election.Observe(obCtx) defer obCancel() @@ -472,7 +472,7 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 监测自己key的存活状态 + // Monitor the survival status of one's own key exitSignal := make(chan struct{}, 1) go func() { defer wg.Done() @@ -490,8 +490,8 @@ func EtcdElectionCampaign(pfx *C.char, pfxLen C.int, } }() - // 1. Campaign返回nil说明当前mds持有的key版本号最小 - // 2. Campaign返回时不检测自己持有key的状态,所以返回nil后需要监测session.Done() + // 1. Campaign returns nil indicating that the current MDS holds the smallest key version number + // 2. When Campaign returns, it does not detect the status of the key it holds, so after returning nil, it is necessary to monitor session. Done() if err := election.Campaign(ctx, goLeaderName); err == nil { log.Printf("[%s/%x] campaign for leader success", goLeaderName, session.Lease()) diff --git a/thirdparties/glog/glog.patch b/thirdparties/glog/glog.patch index caf513b9df..8c005f91a4 100644 --- a/thirdparties/glog/glog.patch +++ b/thirdparties/glog/glog.patch @@ -84,13 +84,13 @@ index 9968b96..3939a81 100644 // Sets whether to avoid logging to the disk if the disk is full. DECLARE_bool(stop_logging_if_full_disk); -+// 打开/关闭异步日志 ++// Turn on/off asynchronous logging +DECLARE_bool(log_async); + -+// buffer最大的字节数 ++// The maximum number of bytes in the buffer +DECLARE_int32(log_async_buffer_size); + -+// 是否写多个级别的文件 ++// Do you want to write multiple levels of files +DECLARE_bool(multi_write); + #ifdef MUST_UNDEF_GFLAGS_DECLARE_MACROS @@ -576,12 +576,12 @@ index 0c86cf6..7b20b1c 100644 - (this->*(data_->send_method_))(); - ++num_messages_[static_cast(data_->severity_)]; + if (FLAGS_log_async) { -+ // 此处关闭glog的全局锁 -+ // 日志在打入active buffer是加锁的 -+ // 后台线程在将日志写入文件时,也会获取日志文件对应的锁 -+ // 所以关闭全局锁对日志方面不会产生影响 -+ // 但是,如果在运行期间修改日志级别、重设文件保存路径等,会存在延迟的情况 -+ // 只要在初始化之后,不调用除打印日志接口之外的函数,不会出现线程安全问题 ++ // Disabling the global lock for glog here. ++ // Logging is locked when writing to the active buffer. ++ // Background threads also acquire the lock corresponding to the log file when writing logs to the file. ++ // So, disabling the global lock does not affect logging operations. ++ // However, there may be delays if log levels are modified or file save paths are reset during runtime. ++ // As long as functions other than log printing interfaces are not called after initialization, there should be no thread safety issues. + // MutexLock l(log_mutex); + (this->*(data_->send_method_))(); + ++num_messages_[static_cast(data_->severity_)]; @@ -699,33 +699,33 @@ index 0c86cf6..7b20b1c 100644 +static Mutex* g_cleaer_log_mutex = new Mutex(); + +void EnableAsyncLogging() { -+ // 默认开启INFO异步日志 ++ // Enable INFO asynchronous logging by default + std::vector asyncLogLevels{google::INFO}; + -+ // 如果写多个日志文件,开启WARNING ERROR的异步日志 ++ // If writing multiple log files, enable asynchronous logging of WARNING ERROR + if (FLAGS_multi_write) { + asyncLogLevels.emplace_back(google::WARNING); + asyncLogLevels.emplace_back(google::ERROR); + } + -+ // 开启异步日志 ++ // Enable asynchronous logging + for (auto level : asyncLogLevels) { -+ // 同步日志 ++ // Synchronize logs + SyncLoggerPtr sync_logger(new google::LogFileObject(level, NULL)); + -+ // 异步日志 ++ // Asynchronous logging + AsyncLoggerPtr logger(new google::AsyncLogger( + sync_logger.release(), 1024u * 1024 * FLAGS_log_async_buffer_size_MB)); + + g_async_loggers.push_back(std::move(logger)); + g_async_loggers.back()->Start(); + -+ // 注册异步日志 ++ // Register asynchronous logs + google::base::SetLogger(level, g_async_loggers.back().get()); + } +} + -+// stop所有的AscynLogger,stop会等待日志写入文件后返回 ++// Stop all AscynLoggers and wait for the logs to be written to the file before returning +void FlushAllBuffersOnExit() { + MutexLock l(g_cleaer_log_mutex); + for (auto& logger : g_async_loggers) { @@ -733,13 +733,13 @@ index 0c86cf6..7b20b1c 100644 + } +} + -+// 捕捉信号后的回调函数 ++// Callback function after signal capture +void FailureWriterWithFlush(const char* data, int size) { + FlushAllBuffersOnExit(); + write(STDERR_FILENO, data, size); +} + -+// LOG(FATAL)回调函数 ++// LOG(FATAL) callback function +void FlushAndAbort() { + FlushAllBuffersOnExit(); + abort(); @@ -753,16 +753,16 @@ index 0c86cf6..7b20b1c 100644 + glog_internal_namespace_::InitGoogleLoggingUtilities(argv0); + -+ // 捕捉信号 ++ // Capture signal + google::InstallFailureSignalHandler(); + -+ // 设置信号回调函数 ++ // Set signal callback function + google::InstallFailureWriter(FailureWriterWithFlush); + -+ // 设置LOG(FATAL)回调 ++ // Set LOG(FATAL) callback + google::InstallFailureFunction(FlushAndAbort); + -+ // 初始化日志保存目录 ++ // Initialize Log Save Directory + if (FLAGS_log_dir.empty()) { + FLAGS_log_dir = "/tmp"; + } @@ -902,12 +902,12 @@ index 0000000..65b1f4c + Switch2Child(false); + if (running_) { + { -+ // 获取锁,等待Write或Flush结束 -+ // 1. 避免写入过程中Stop -+ // 2. 避免flush过程中Stop ++ // Obtain lock, wait for Write or Flush to end ++ // 1. Avoid Stop during write process ++ // 2. Avoid Stop during flush process + std::lock_guard lk(mutexs_[current_]); + -+ // 设置为false ++ // Set to false + running_ = false; + + wakeFluchCVs_[current_].notify_one(); @@ -928,10 +928,10 @@ index 0000000..65b1f4c + return; + } + -+ // 如果buffer满, 则会阻塞到这里 -+ // 等到后台线程换出buffer ++ // If the buffer is full, it will block here ++ // Wait until the backend thread switches out the buffer + while (IsBufferFull(*activeBuffer_)) { -+ // 记录前台线程被阻塞的次数,测试使用 ++ // Record the number of times the foreground thread has been blocked, and test the use of + ++appThreadsBlockedCount_; + freeBufferCVs_[current_].wait(ulk); + } @@ -940,9 +940,9 @@ index 0000000..65b1f4c + wakeFluchCVs_[current_].notify_one(); + } + -+ // glog在打印FATAL日志时, 会依次FATAL->INFO写入多个日志文件,然后abort -+ // 对INFO WARNING ERROR级别开启了异步功能 -+ // 所以要保证这条FATAL日志写入文件后,再abort ++ // When glog prints a FATAL log, it sequentially writes FATAL->INFO to multiple log files and then aborts. ++ // Asynchronous functionality is enabled for INFO, WARNING, and ERROR levels. ++ // Therefore, it is important to ensure that this FATAL log is written to the file before aborting. + if (len > 0 && message[0] == 'F') { + Flush(); + } @@ -958,24 +958,24 @@ index 0000000..65b1f4c + std::unique_lock ulk(mutexs_[current_]); + + // running_ == true -+ // 如果activeBuffer为空, 则会等待一段时间,避免无效的swap ++ // If activeBuffer is empty, it will wait for a period of time to avoid invalid swaps + // running_ == false -+ // AsyncLogger已经停止,但是aciveBuffer中仍然可能有未写入文件的日志 ++ // AsyncLogger has stopped, but there may still be logs in the aciveBuffer that have not been written to files + if (!activeBuffer_->NeedsWriteOrFlush() && running_) { + wakeFluchCVs_[current_].wait_for( + ulk, std::chrono::seconds(FLAGS_logbufsecs)); + } + -+ // 交换buffer ++ // Exchange buffer + flushBuffer_.swap(activeBuffer_); + -+ // flushBuffer满, 前台线程在阻塞过程中, 调用notify_all唤醒 ++ // When the flushBuffer is full and the foreground thread is blocked, it is awakened by calling notify_all. + if (IsBufferFull(*flushBuffer_)) { + freeBufferCVs_[current_].notify_all(); + } + } + -+ // 逐条写入日志文件 ++ // Write log files one by one + for (const auto& msg : flushBuffer_->messages) { + logger_->Write(false, msg.timestamp, + msg.message.data(), @@ -999,7 +999,7 @@ index 0000000..65b1f4c + std::unique_lock ulk(mutexs_[current_]); + uint64_t expectFlushCount = flushCount_ + 2; + -+ // flush 两次, 确保两个buffer都进行了flush ++ // flush twice to ensure that both buffers have been flushed + while (flushCount_ < expectFlushCount && running_) { + activeBuffer_->flush = true; + wakeFluchCVs_[current_].notify_one(); @@ -1075,7 +1075,7 @@ index 0000000..4d3050d + const char* message, + int message_len) override; + -+ // 日志刷新到文件 ++ // Log refresh to file + void Flush() override; + uint32_t LogSize() override; + @@ -1127,36 +1127,36 @@ index 0000000..4d3050d + + void Switch2Child(bool start); + -+ // 后台线程写日志文件的logger ++ // logger for background thread writing log files + std::unique_ptr logger_; + -+ // 后台写日志线程 ++ // Background log writing thread + std::thread loggerThreads_[2]; + -+ // buffer中日志的最大字节数 ++ // The maximum number of bytes of logs in the buffer + const uint32_t maxBufferSize_; + -+ // buffer满时, 前台线程被阻塞的次数,for test ++ // The number of times a foreground thread is blocked when the buffer is full, for test + std::atomic appThreadsBlockedCount_; + -+ // flush的次数 ++ // Number of flushes + std::atomic flushCount_; + + mutable std::mutex mutexs_[2]; + -+ // 用于唤醒后台写日志线程 ++ // Used to wake up the backend log writing thread + std::condition_variable wakeFluchCVs_[2]; + -+ // 用于唤醒前台线程 ++ // Used to wake up foreground threads + std::condition_variable freeBufferCVs_[2]; + -+ // 用于同步Flush过程 ++ // Used to synchronize the Flush process + std::condition_variable flushCompleteCVs_[2]; + -+ // 前台线程将日志保存到active buffer中 ++ // The foreground thread saves the logs to the active buffer + std::unique_ptr activeBuffer_; + -+ // 后台线程将flush buffer中的日志写入文件 ++ // The backend thread writes logs from the flush buffer to a file + std::unique_ptr flushBuffer_; + + std::atomic running_; @@ -1278,7 +1278,7 @@ index 0000000..7089e40 + + CHECK_EQ(logger->message_count_, kNumMessages * kNumThreads); + CHECK_LT(logger->flush_count_, kNumMessages * kNumThreads); -+ logger.release(); // 内存已由async接管 ++ logger.release(); // Memory taken over by async +} diff --git a/src/logging_unittest.cc b/src/logging_unittest.cc index 762c752..11eccc1 100644 @@ -1322,7 +1322,7 @@ index 762c752..11eccc1 100644 // TODO: The golden test portion of this test is very flakey. - EXPECT_TRUE( - MungeAndDiffTestStderr(FLAGS_test_srcdir + "/src/logging_unittest.err")); -+ // 时间格式转换成iso8601, diff 会失败 ++ // Convert time format to iso8601, diff will fail + // EXPECT_TRUE(MungeAndDiffTestStderr(FLAGS_test_srcdir + "/src/logging_unittest.err")); FLAGS_logtostderr = false; diff --git a/tools/curvefsTool.cpp b/tools/curvefsTool.cpp index 698e54d535..a560ccfa03 100644 --- a/tools/curvefsTool.cpp +++ b/tools/curvefsTool.cpp @@ -83,8 +83,8 @@ namespace topology { const std::string CurvefsTools::clusterMapSeprator = " "; // NOLINT void UpdateFlagsFromConf(curve::common::Configuration* conf) { - // 如果配置文件不存在的话不报错,以命令行为准,这是为了不强依赖配置 - // 如果配置文件存在并且没有指定命令行的话,就以配置文件为准 + // If the configuration file does not exist, no error will be reported, and the command line will prevail. This is to avoid strong dependence on the configuration + // If the configuration file exists and no command line is specified, the configuration file shall prevail if (conf->LoadConfig()) { google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("mds_addr", &info) && info.is_default) { diff --git a/tools/snaptool/queryclone.py b/tools/snaptool/queryclone.py index a80d746f7a..1ee81e0969 100644 --- a/tools/snaptool/queryclone.py +++ b/tools/snaptool/queryclone.py @@ -29,7 +29,7 @@ def query_clone_recover(args): if totalCount == 0: print "no record found" return - # 提高打印可读性 + # Improving Print Readability for record in records: code = record['TaskStatus'] record['TaskStatus'] = status[code]