From 676282397105577bee49cb365e4215ec94eae9ef Mon Sep 17 00:00:00 2001 From: koko2pp Date: Mon, 27 Nov 2023 23:24:12 +0800 Subject: [PATCH] style: translate ch-comments to en-comments and apply clang-format part1 Signed-off-by: koko2pp --- conf/chunkserver.conf.example | 145 +-- curvefs/test/volume/bitmap_allocator_test.cpp | 7 +- curvefs_python/cbd_client.h | 12 +- .../local/chunkserver/conf/chunkserver.conf.0 | 4 +- .../local/chunkserver/conf/chunkserver.conf.1 | 4 +- .../local/chunkserver/conf/chunkserver.conf.2 | 4 +- include/chunkserver/chunkserver_common.h | 72 +- nebd/src/part1/async_request_closure.cpp | 22 +- nebd/src/part1/async_request_closure.h | 81 +- proto/cli.proto | 6 +- proto/cli2.proto | 16 +- src/chunkserver/chunk_closure.cpp | 29 +- src/chunkserver/chunk_closure.h | 55 +- src/chunkserver/chunk_service.cpp | 303 +++--- src/chunkserver/chunk_service.h | 107 +-- src/chunkserver/chunk_service_closure.cpp | 104 +-- src/chunkserver/chunk_service_closure.h | 65 +- src/chunkserver/chunkserver.cpp | 601 ++++++------ src/chunkserver/chunkserver.h | 97 +- src/chunkserver/chunkserver_helper.cpp | 20 +- src/chunkserver/chunkserver_main.cpp | 2 +- src/chunkserver/chunkserver_metrics.cpp | 121 +-- src/chunkserver/chunkserver_metrics.h | 355 +++---- src/chunkserver/cli2.h | 83 +- src/client/chunk_closure.cpp | 489 +++++----- src/client/chunk_closure.h | 153 ++- src/common/authenticator.h | 29 +- src/common/bitmap.cpp | 139 ++- src/common/bitmap.h | 171 ++-- src/common/channel_pool.h | 22 +- .../concurrent/bounded_blocking_queue.h | 38 +- src/tools/chunkserver_client.cpp | 30 +- src/tools/chunkserver_client.h | 61 +- src/tools/chunkserver_tool_factory.h | 15 +- test/chunkserver/braft_cli_service2_test.cpp | 195 ++-- test/chunkserver/braft_cli_service_test.cpp | 80 +- test/chunkserver/chunk_service_test.cpp | 78 +- test/chunkserver/chunk_service_test2.cpp | 145 ++- test/chunkserver/chunkserver_helper_test.cpp | 10 +- test/chunkserver/chunkserver_service_test.cpp | 30 +- test/common/bitmap_test.cpp | 30 +- test/common/channel_pool_test.cpp | 12 +- .../chunkserver/chunkserver_basic_test.cpp | 280 +++--- .../chunkserver/chunkserver_clone_recover.cpp | 395 ++++---- .../chunkserver_concurrent_test.cpp | 868 ++++++------------ .../client/chunkserver_exception_test.cpp | 260 +++--- .../chunkserver_healthy_checker_test.cpp | 86 +- .../alloc_statistic_helper_test.cpp | 73 +- .../allocstatistic/alloc_statistic_test.cpp | 140 +-- test/tools/chunkserver_client_test.cpp | 51 +- 50 files changed, 2887 insertions(+), 3308 deletions(-) diff --git a/conf/chunkserver.conf.example b/conf/chunkserver.conf.example index 443412215b..f7ab284dd9 100644 --- a/conf/chunkserver.conf.example +++ b/conf/chunkserver.conf.example @@ -1,18 +1,18 @@ # # Global settings # -# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3 +# Log levels: INFO=0/WARNING=1/ERROR=2/FATAL=3 global.ip=127.0.0.1 global.port=8200 global.subnet=127.0.0.0/24 global.enable_external_server=false global.external_ip=127.0.0.1 global.external_subnet=127.0.0.0/24 -# chunk大小,一般16MB +# Chunk size, typically 16MB global.chunk_size=16777216 -# chunk 元数据页大小,一般4KB +# Chunk metadata page size, typically 4KB global.meta_page_size=4096 -# clone chunk允许的最长location长度 +# Maximum length allowed for the location of a clone chunk # chunk's block size, IO requests must align with it, supported value is |512| and |4096| # it should consist with `block_size` in chunkfilepool.meta_path and `mds.volume.blockSize` in MDS's configurations # for clone chunk and snapshot chunk, it's also the minimum granularity that each bit represents @@ -23,34 +23,35 @@ global.location_limit=3000 # # MDS settings # -#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777 +# Support for multiple addresses for MDS, separated by commas: 127.0.0.1:6666,127.0.0.1:7777 mds.listen.addr=127.0.0.1:6666 -# 向mds注册的最大重试次数 +# Maximum retry count for registering with MDS mds.register_retries=100 -# 向mds注册的rpc超时时间,一般1000ms +# RPC timeout for MDS registration, typically 1000ms mds.register_timeout=1000 -# 向mds发送心跳的间隔,一般10s +# Interval for sending heartbeats to MDS, usually 10s mds.heartbeat_interval=10 -# 向mds发送心跳的rpc超时间,一般1000ms +# RPC timeout for sending heartbeats to MDS, typically 1000ms mds.heartbeat_timeout=5000 # # Chunkserver settings # -# chunkserver主目录 +# Main directory for chunkserver chunkserver.stor_uri=local://./0/ -# chunkserver元数据文件 +# Metadata file for chunkserver chunkserver.meta_uri=local://./0/chunkserver.dat -# disk类型 +# Disk type chunkserver.disk_type=nvme -# raft内部install snapshot带宽上限,一般20MB +# Raft internal install snapshot bandwidth limit, usually 20MB chunkserver.snapshot_throttle_throughput_bytes=20971520 -# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, -# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 -# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 -# 不是20MB的带宽 +# Throttle check cycles are for finer-grained bandwidth control. For example, +# with snapshotThroughputBytes=100MB and check cycles=10, it ensures that +# the bandwidth is 10MB every 1/10 second, without accumulation. For instance, +# the bandwidth is 10MB for the first 1/10 second, but it expires after that. +# In the second 1/10 second, the bandwidth remains 10MB, not 20MB. chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit for the number of inflight IO requests, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -64,41 +65,41 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0 # # Copyset settings # -# 是否检查任期,一般检查 +# Whether to check the term, usually checked copyset.check_term=true -# 是否关闭raft配置变更的服务,一般不关闭 +# Whether to disable the service for raft configuration changes, generally not disabled copyset.disable_cli=false copyset.log_applied_task=false -# raft选举超时时间,一般是5000ms +# Raft election timeout, usually 5000ms copyset.election_timeout_ms=1000 -# raft打快照间隔,一般是1800s,也就是30分钟 +# Raft snapshot interval, usually 1800s, i.e., 30 minutes copyset.snapshot_interval_s=1800 -# add一个节点,add的节点首先以类似learner的角色拷贝数据 -# 在跟leader差距catchup_margin个entry的时候,leader -# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定 -# 会commit&apply,catchup_margin较小可以大概率确保learner -# 后续很快可以加入复制组 +# When adding a node, the added node first copies data in a role similar to a learner. +# When there is a difference of catchup_margin entries from the leader, the leader +# will attempt to commit and apply the configuration change entry (usually the committed +# entry will certainly be committed and applied). A smaller catchup_margin can ensure +# that the learner can quickly join the replication group. copyset.catchup_margin=1000 -# copyset chunk数据目录 +# Copyset chunk data directory copyset.chunk_data_uri=local://./0/copysets -# raft wal log目录 +# Raft WAL log directory copyset.raft_log_uri=curve://./0/copysets -# raft元数据目录 +# Raft metadata directory copyset.raft_meta_uri=local://./0/copysets -# raft snapshot目录 +# Raft snapshot directory copyset.raft_snapshot_uri=curve://./0/copysets -# copyset回收目录 +# Copyset recycling directory copyset.recycler_uri=local://./0/recycler -# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制 +# When the chunk server starts, the threshold for concurrent loading of copysets, set to 0 to indicate no limitation. copyset.load_concurrency=10 -# chunkserver use how many threads to use copyset complete sync. +# Number of threads used by chunk server for copyset complete synchronization. copyset.sync_concurrency=20 -# 检查copyset是否加载完成出现异常时的最大重试次数 +# Maximum retry times when checking for exceptions during copyset loading. copyset.check_retrytimes=3 -# 当前peer的applied_index与leader上的committed_index差距小于该值 -# 则判定copyset已经加载完成 +# If the difference between the applied_index of the current peer and the committed_index +# on the leader is less than this value, the copyset is considered loaded. copyset.finishload_margin=2000 -# 循环判定copyset是否加载完成的内部睡眠时间 +# Internal sleep time for cyclically determining if the copyset is loaded. copyset.check_loadmargin_interval_ms=1000 # scan copyset interval copyset.scan_interval_sec=5 @@ -124,26 +125,26 @@ copyset.check_syncing_interval_ms=500 # # Clone settings # -# 禁止使用curveclient +# Prohibit the use of curveclient clone.disable_curve_client=false -# 禁止使用s3adapter +# Prohibit the use of s3adapter clone.disable_s3_adapter=false -# 克隆的分片大小,一般1MB +# The shard size of the clone, usually 1MB clone.slice_size=1048576 -# 读clone chunk时是否需要paste到本地 -# 该配置对recover chunk请求类型无效 +# Do I need to paste to the local location when reading the clone chunk +# This configuration is not valid for the recover chunk request type clone.enable_paste=false -# 克隆的线程数量 +# Number of cloned threads clone.thread_num=10 -# 克隆的队列深度 +# Queue depth for cloning clone.queue_depth=6000 -# curve用户名 +# Curve username curve.root_username=root -# curve密码 +# Curve password curve.root_password=root_password -# client配置文件 +# Client configuration file curve.config_path=conf/cs_client.conf -# s3配置文件 +# S3 configuration file s3.config_path=conf/s3.conf # Curve File time to live curve.curve_file_timeout_s=30 @@ -151,7 +152,7 @@ curve.curve_file_timeout_s=30 # # Local FileSystem settings # -# 是否开启使用renameat2,ext4内核3.15以后开始支持 +# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards fs.enable_renameat2=true # @@ -171,27 +172,27 @@ storeng.sync_write=false # # Concurrent apply module -# 并发模块写线程的并发度,一般是10 +# The concurrency of concurrent module writing threads is generally 10 wconcurrentapply.size=10 -# 并发模块写线程的队列深度 +# Queue depth of concurrent module write threads wconcurrentapply.queuedepth=1 -# 并发模块读线程的并发度,一般是5 +# The concurrency of concurrent module read threads is generally 5 rconcurrentapply.size=5 -# 并发模块读线程的队列深度 +# Queue depth of concurrent module read threads rconcurrentapply.queuedepth=1 # # Chunkfile pool # -# 是否开启从chunkfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from chunkfilepool, usually true chunkfilepool.enable_get_chunk_from_pool=true -# chunkfilepool目录 +# chunkfilepool directory chunkfilepool.chunk_file_pool_dir=./0/chunks -# chunkfilepool meta文件路径 +# chunkfilepool meta file path #chunkfilepool.meta_path=./chunkfilepool.meta -# chunkfilepool meta文件大小 +# chunkfilepool meta file size chunkfilepool.cpmeta_file_size=4096 -# chunkfilepool get chunk最大重试次数 +# chunkfilepool get chunk maximum retry count chunkfilepool.retry_times=5 # Enable clean chunk chunkfilepool.clean.enable=true @@ -211,23 +212,23 @@ chunkfilepool.thread_num=1 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效 +# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry walfilepool.use_chunk_file_pool=true -# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间 +# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation walfilepool.use_chunk_file_pool_reserve=15 -# 是否开启从walfilepool获取chunk,一般是true +# Whether to enable obtaining chunks from walfilepool, usually true walfilepool.enable_get_segment_from_pool=true -# walpool目录 +# Walpool directory walfilepool.file_pool_dir=./0/ -# walpool meta文件路径 +# Walpool Meta File Path walfilepool.meta_path=./walfilepool.meta -# walpool meta文件大小 +# Walpool Meta File Size walfilepool.segment_size=8388608 -# WAL metapage大小 +# WAL metapage size walfilepool.metapage_size=4096 -# WAL filepool 元数据文件大小 +# WAL filepool metadata file size walfilepool.meta_file_size=4096 -# WAL filepool get chunk最大重试次数 +# WAL filepool get chunk maximum retry count walfilepool.retry_times=5 # Whether allocate filePool by percent of disk size. walfilepool.allocated_by_percent=true @@ -241,14 +242,14 @@ walfilepool.thread_num=1 # # trash settings # -# chunkserver回收数据彻底删除的过期时间 +# The expiration time for chunkserver to completely delete data for recycling trash.expire_afterSec=300 -# chunkserver检查回收数据过期时间的周期 +# Chunkserver checks the cycle of recycling data expiration time trash.scan_periodSec=120 # common option # -# chunkserver 日志存放文件夹 +# Chunkserver log storage folder chunkserver.common.logDir=./ -# 单元测试情况下 +# In the case of unit testing # chunkserver.common.logDir=./runlog/ diff --git a/curvefs/test/volume/bitmap_allocator_test.cpp b/curvefs/test/volume/bitmap_allocator_test.cpp index 3eca470fec..88c324e9e4 100644 --- a/curvefs/test/volume/bitmap_allocator_test.cpp +++ b/curvefs/test/volume/bitmap_allocator_test.cpp @@ -18,9 +18,8 @@ #include -#include "curvefs/test/volume/common.h" - #include "absl/memory/memory.h" +#include "curvefs/test/volume/common.h" namespace curvefs { namespace volume { @@ -100,7 +99,7 @@ TEST_F(BitmapAllocatorTest, AllocFromBitmap) { Extents expected = { Extent(opt_.startOffset + opt_.length * opt_.smallAllocProportion, - allocSize)}; + allocSize)}; ASSERT_EQ(expected, exts); @@ -225,7 +224,7 @@ TEST_F(BitmapAllocatorTest, TestMarkUsedRandom) { uint64_t off = opt_.startOffset; uint64_t usedSize = 0; - // 对于每一个 size per bit,随机其中一部分设置 + // For each size per bit, randomly set a portion of it auto select = [this, &usedSize](uint64_t startOffset) { auto off = rand_r(&seed) * 4096 % opt_.sizePerBit; auto len = rand_r(&seed) * 4096 % opt_.sizePerBit; diff --git a/curvefs_python/cbd_client.h b/curvefs_python/cbd_client.h index 64109ef8e5..a5415b26e3 100644 --- a/curvefs_python/cbd_client.h +++ b/curvefs_python/cbd_client.h @@ -56,15 +56,17 @@ class CBDClient { int Rename(UserInfo_t* info, const char* oldpath, const char* newpath); int Extend(const char* filename, UserInfo_t* info, uint64_t size); - // 同步读写 - int Read(int fd, char* buf, unsigned long offset, unsigned long length); // NOLINT - int Write(int fd, const char* buf, unsigned long offset, unsigned long length); // NOLINT + // Synchronous read and write + int Read(int fd, char* buf, unsigned long offset, + unsigned long length); // NOLINT + int Write(int fd, const char* buf, unsigned long offset, + unsigned long length); // NOLINT - // 异步读写 + // Asynchronous read and write int AioRead(int fd, AioContext* aioctx); int AioWrite(int fd, AioContext* aioctx); - // 获取文件的基本信息 + // Obtain basic information about the file int StatFile(const char* filename, UserInfo_t* info, FileInfo_t* finfo); int ChangeOwner(const char* filename, const char* owner, UserInfo_t* info); diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.0 b/deploy/local/chunkserver/conf/chunkserver.conf.0 index 1525855ebe..f7ac0f1f19 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.0 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.0 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./0/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -145,7 +145,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# If walpool is set to true, the following configuration is invalid as walpool shares chunkfilepool. walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./0/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.1 b/deploy/local/chunkserver/conf/chunkserver.conf.1 index d14fa15bb6..62719e0c30 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.1 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.1 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./1/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -143,7 +143,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./1/walfilepool/ diff --git a/deploy/local/chunkserver/conf/chunkserver.conf.2 b/deploy/local/chunkserver/conf/chunkserver.conf.2 index 2604423d6f..edc5750db7 100644 --- a/deploy/local/chunkserver/conf/chunkserver.conf.2 +++ b/deploy/local/chunkserver/conf/chunkserver.conf.2 @@ -46,7 +46,7 @@ chunkserver.meta_uri=local://./2/chunkserver.dat chunkserver.disk_type=nvme chunkserver.snapshot_throttle_throughput_bytes=41943040 chunkserver.snapshot_throttle_check_cycles=4 -# 限制inflight io数量,一般是5000 +# Limit the number of inflight io, usually 5000 chunkserver.max_inflight_requests=5000 # @@ -142,7 +142,7 @@ chunkfilepool.retry_times=5 # # WAL file pool # -# walpool是否共用chunkfilepool,如果为true,则以下配置无效 +# Does walpool share chunkfilepool? If true, the following configuration is invalid walfilepool.use_chunk_file_pool=true walfilepool.enable_get_segment_from_pool=false walfilepool.file_pool_dir=./2/walfilepool/ diff --git a/include/chunkserver/chunkserver_common.h b/include/chunkserver/chunkserver_common.h index c483dbea82..bb5ab9b87f 100644 --- a/include/chunkserver/chunkserver_common.h +++ b/include/chunkserver/chunkserver_common.h @@ -24,9 +24,9 @@ #define INCLUDE_CHUNKSERVER_CHUNKSERVER_COMMON_H_ #include +#include #include #include -#include #include #include @@ -35,16 +35,16 @@ namespace curve { namespace chunkserver { /* for IDs */ -using LogicPoolID = uint32_t; -using CopysetID = uint32_t; -using ChunkID = uint64_t; -using SnapshotID = uint64_t; -using SequenceNum = uint64_t; +using LogicPoolID = uint32_t; +using CopysetID = uint32_t; +using ChunkID = uint64_t; +using SnapshotID = uint64_t; +using SequenceNum = uint64_t; using ChunkSizeType = uint32_t; -using PageSizeType = uint32_t; +using PageSizeType = uint32_t; -using GroupNid = uint64_t; +using GroupNid = uint64_t; using ChunkServerID = uint32_t; // braft @@ -60,57 +60,57 @@ using PosixFileSystemAdaptor = braft::PosixFileSystemAdaptor; using SnapshotThrottle = braft::SnapshotThrottle; using ThroughputSnapshotThrottle = braft::ThroughputSnapshotThrottle; - -// TODO(lixiaocui): 考虑一下后续的单元测试或者校验要怎么做 +// TODO(lixiaocui): Consider how to proceed with subsequent unit testing or +// validation /* - * IO性能统计复合metric类型 + * IO performance statistics composite metric type */ struct IoPerfMetric { - uint64_t readCount; - uint64_t writeCount; - uint64_t readBytes; - uint64_t writeBytes; - uint64_t readIops; - uint64_t writeIops; - uint64_t readBps; - uint64_t writeBps; + uint64_t readCount; + uint64_t writeCount; + uint64_t readBytes; + uint64_t writeBytes; + uint64_t readIops; + uint64_t writeIops; + uint64_t readBps; + uint64_t writeBps; }; /** - * 将(LogicPoolID, CopysetID)二元组转换成数字格式的复制组ID,格式如下: + * Convert the (LogicPoolID, CopysetID) binary into a copy group ID in numerical + * format, as follows: * | group id | * | 32 | 32 | * | logic pool id | copyset id | */ -inline GroupNid ToGroupNid(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupNid ToGroupNid(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return (static_cast(logicPoolId) << 32) | copysetId; } /** - * 将(LogicPoolID, CopysetID)二元组转换成字符串格式的复制组ID + * Convert the (LogicPoolID, CopysetID) binary to a copy group ID in string + * format */ -inline GroupId ToGroupId(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +inline GroupId ToGroupId(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { return std::to_string(ToGroupNid(logicPoolId, copysetId)); } -#define ToBraftGroupId ToGroupId +#define ToBraftGroupId ToGroupId /** - * 从数字格式的复制组ID中解析LogicPoolID + * Parsing LogicPoolID from Copy Group ID in Numeric Format */ -inline LogicPoolID GetPoolID(const GroupNid &groupId) { - return groupId >> 32; -} +inline LogicPoolID GetPoolID(const GroupNid& groupId) { return groupId >> 32; } /** - * 从数字格式的复制组ID中解析CopysetID + * Parsing CopysetID from Copy Group ID in Numeric Format */ -inline CopysetID GetCopysetID(const GroupNid &groupId) { +inline CopysetID GetCopysetID(const GroupNid& groupId) { return groupId & (((uint64_t)1 << 32) - 1); } -/* 格式输出 group id 的 字符串 (logicPoolId, copysetId) */ -inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +/* Format output string for group ID (logicPoolId, copysetId) */ +inline std::string ToGroupIdString(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { std::string groupIdString; groupIdString.append("("); groupIdString.append(std::to_string(logicPoolId)); @@ -121,7 +121,7 @@ inline std::string ToGroupIdString(const LogicPoolID &logicPoolId, groupIdString.append(")"); return groupIdString; } -#define ToGroupIdStr ToGroupIdString +#define ToGroupIdStr ToGroupIdString // Meta page is header of chunkfile, and is used to store meta data of // chunkfile. diff --git a/nebd/src/part1/async_request_closure.cpp b/nebd/src/part1/async_request_closure.cpp index 94d1a9f50f..c9ab8e873e 100644 --- a/nebd/src/part1/async_request_closure.cpp +++ b/nebd/src/part1/async_request_closure.cpp @@ -22,8 +22,8 @@ #include "nebd/src/part1/async_request_closure.h" -#include #include +#include #include #include @@ -40,11 +40,10 @@ void AsyncRequestClosure::Run() { int64_t sleepUs = GetRpcRetryIntervalUs(aioCtx->retryCount); LOG_EVERY_SECOND(WARNING) << OpTypeToString(aioCtx->op) << " rpc failed" - << ", error = " << cntl.ErrorText() - << ", fd = " << fd + << ", error = " << cntl.ErrorText() << ", fd = " << fd << ", log id = " << cntl.log_id() - << ", retryCount = " << aioCtx->retryCount - << ", sleep " << (sleepUs / 1000) << " ms"; + << ", retryCount = " << aioCtx->retryCount << ", sleep " + << (sleepUs / 1000) << " ms"; bthread_usleep(sleepUs); Retry(); } else { @@ -52,7 +51,7 @@ void AsyncRequestClosure::Run() { if (nebd::client::RetCode::kOK == retCode) { DVLOG(6) << OpTypeToString(aioCtx->op) << " success, fd = " << fd; - // 读请求复制数据 + // Read Request Copy Data if (aioCtx->op == LIBAIO_OP::LIBAIO_OP_READ) { cntl.response_attachment().copy_to( aioCtx->buf, cntl.response_attachment().size()); @@ -73,8 +72,8 @@ void AsyncRequestClosure::Run() { } int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { - // EHOSTDOWN: 找不到可用的server。 - // server可能停止服务了,也可能正在退出中(返回了ELOGOFF) + // EHOSTDOWN: Unable to find an available server. + // The server may have stopped serving or may be exiting (returning ELOGOFF) if (cntl.ErrorCode() == EHOSTDOWN) { return requestOption_.rpcHostDownRetryIntervalUs; } @@ -83,10 +82,9 @@ int64_t AsyncRequestClosure::GetRpcRetryIntervalUs(int64_t retryCount) const { return requestOption_.rpcRetryIntervalUs; } - return std::max( - requestOption_.rpcRetryIntervalUs, - std::min(requestOption_.rpcRetryIntervalUs * retryCount, - requestOption_.rpcRetryMaxIntervalUs)); + return std::max(requestOption_.rpcRetryIntervalUs, + std::min(requestOption_.rpcRetryIntervalUs * retryCount, + requestOption_.rpcRetryMaxIntervalUs)); } void AsyncRequestClosure::Retry() const { diff --git a/nebd/src/part1/async_request_closure.h b/nebd/src/part1/async_request_closure.h index 27ab7f613d..0df2f03172 100644 --- a/nebd/src/part1/async_request_closure.h +++ b/nebd/src/part1/async_request_closure.h @@ -32,12 +32,9 @@ namespace nebd { namespace client { struct AsyncRequestClosure : public google::protobuf::Closure { - AsyncRequestClosure(int fd, - NebdClientAioContext* ctx, + AsyncRequestClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : fd(fd), - aioCtx(ctx), - requestOption_(option) {} + : fd(fd), aioCtx(ctx), requestOption_(option) {} void Run() override; @@ -47,94 +44,70 @@ struct AsyncRequestClosure : public google::protobuf::Closure { void Retry() const; - // 请求fd + // Request fd int fd; - // 请求上下文信息 + // Request Context Information NebdClientAioContext* aioCtx; - // brpc请求的controller + // Controller requested by brpc brpc::Controller cntl; RequestOption requestOption_; }; struct AioWriteClosure : public AsyncRequestClosure { - AioWriteClosure(int fd, - NebdClientAioContext* ctx, + AioWriteClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} WriteResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioReadClosure : public AsyncRequestClosure { - AioReadClosure(int fd, - NebdClientAioContext* ctx, + AioReadClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} ReadResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioDiscardClosure : public AsyncRequestClosure { - AioDiscardClosure(int fd, - NebdClientAioContext* ctx, + AioDiscardClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} DiscardResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; struct AioFlushClosure : public AsyncRequestClosure { - AioFlushClosure(int fd, - NebdClientAioContext* ctx, + AioFlushClosure(int fd, NebdClientAioContext* ctx, const RequestOption& option) - : AsyncRequestClosure( - fd, - ctx, - option) {} + : AsyncRequestClosure(fd, ctx, option) {} FlushResponse response; - RetCode GetResponseRetCode() const override { - return response.retcode(); - } + RetCode GetResponseRetCode() const override { return response.retcode(); } }; inline const char* OpTypeToString(LIBAIO_OP opType) { switch (opType) { - case LIBAIO_OP::LIBAIO_OP_READ: - return "Read"; - case LIBAIO_OP::LIBAIO_OP_WRITE: - return "Write"; - case LIBAIO_OP::LIBAIO_OP_DISCARD: - return "Discard"; - case LIBAIO_OP::LIBAIO_OP_FLUSH: - return "Flush"; - default: - return "Unknown"; + case LIBAIO_OP::LIBAIO_OP_READ: + return "Read"; + case LIBAIO_OP::LIBAIO_OP_WRITE: + return "Write"; + case LIBAIO_OP::LIBAIO_OP_DISCARD: + return "Discard"; + case LIBAIO_OP::LIBAIO_OP_FLUSH: + return "Flush"; + default: + return "Unknown"; } } diff --git a/proto/cli.proto b/proto/cli.proto index 46981c967d..2d0f84696d 100755 --- a/proto/cli.proto +++ b/proto/cli.proto @@ -20,12 +20,12 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli"; -// 这里都用 logicPoolId, copysetId,进入 rpc service 之后,会转换成 string -// 类型的 groupId,在传给 raft +// Here, the terms 'logicPoolId' and 'copysetId' are used. After entering the RPC service, +// they will be converted into a string type 'groupId' and then passed to Raft. // | groupId | // | logicPoolId | copysetId | message AddPeerRequest { - required uint32 logicPoolId = 1; // logicPoolId 实际上 uint16,但是 proto 没有 uint16 + required uint32 logicPoolId = 1; // logicPoolId is actually uint16, but proto does not have uint16 required uint32 copysetId = 2; required string leader_id = 3; required string peer_id = 4; diff --git a/proto/cli2.proto b/proto/cli2.proto index 76416f7a9f..b41d00c322 100755 --- a/proto/cli2.proto +++ b/proto/cli2.proto @@ -23,17 +23,17 @@ package curve.chunkserver; option cc_generic_services = true; option go_package = "proto/cli2"; -// cli.proto 供老的使用保证 +// cli.proto guarantee for elderly use message AddPeerRequest2 { - required uint32 logicPoolId = 1; // 逻辑池id - required uint32 copysetId = 2; // 复制组id + required uint32 logicPoolId = 1; // Logical Pool ID + required uint32 copysetId = 2; // Copy Group ID required common.Peer leader = 3; // leader - required common.Peer addPeer = 4; // 新增peer + required common.Peer addPeer = 4; // Add a peer } message AddPeerResponse2 { - repeated common.Peer oldPeers = 1; // 老配置 - repeated common.Peer newPeers = 2; // 新配置 + repeated common.Peer oldPeers = 1; // Old configuration + repeated common.Peer newPeers = 2; // New configuration } message RemovePeerRequest2 { @@ -87,11 +87,11 @@ message SnapshotAllResponse { message GetLeaderRequest2 { required uint32 logicPoolId = 1; required uint32 copysetId = 2; - optional common.Peer peer = 3; // 可以不指定peer查leader + optional common.Peer peer = 3; // You can check the leader without specifying a peer } message GetLeaderResponse2 { - required common.Peer leader = 1; // 通过peer判空来判断是否返回leader + required common.Peer leader = 1; // Determine whether to return the leader by judging whether the peer is null } message ResetPeerRequest2 { diff --git a/src/chunkserver/chunk_closure.cpp b/src/chunkserver/chunk_closure.cpp index 09b259ae7e..ba547381d8 100644 --- a/src/chunkserver/chunk_closure.cpp +++ b/src/chunkserver/chunk_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_closure.h" + #include namespace curve { @@ -28,21 +29,23 @@ namespace chunkserver { void ChunkClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * After the completion of Run, automatically destruct + * itself to prevent any missed destructor calls. */ std::unique_ptr selfGuard(this); /** - * 确保done能够被调用,目的是保证rpc一定会返回 + * Ensure that done can be called to ensure that rpc will definitely return */ brpc::ClosureGuard doneGuard(request_->Closure()); /** - * 尽管在request propose给copyset的之前已经 - * 对leader身份进行了确认,但是在copyset处理 - * request的时候,当前copyset的身份还是有可能 - * 变成非leader,所以需要判断ChunkClosure被调 - * 用的时候,request的status,如果 ok,说明是 - * 正常的apply处理,否则将请求转发 + * Although the identity of the leader has been confirmed + * before proposing the request to the copyset, during the + * processing of the request by the copyset, the current + * identity of the copyset may still change to a non-leader. + * Therefore, it is necessary to check the status of the + * request when ChunkClosure is invoked. If it is 'ok', it + * indicates a normal apply processing; otherwise, the + * request should be forwarded. */ if (status().ok()) { return; @@ -61,13 +64,13 @@ void ScanChunkClosure::Run() { case CHUNK_OP_STATUS_CHUNK_NOTEXIST: LOG(WARNING) << "scan chunk failed, read chunk not exist. " << request_->ShortDebugString(); - break; + break; case CHUNK_OP_STATUS_FAILURE_UNKNOWN: LOG(ERROR) << "scan chunk failed, read chunk unknown failure. " << request_->ShortDebugString(); - break; - default: - break; + break; + default: + break; } } diff --git a/src/chunkserver/chunk_closure.h b/src/chunkserver/chunk_closure.h index e2d76b7174..6700527c26 100755 --- a/src/chunkserver/chunk_closure.h +++ b/src/chunkserver/chunk_closure.h @@ -24,20 +24,23 @@ #define SRC_CHUNKSERVER_CHUNK_CLOSURE_H_ #include + #include -#include "src/chunkserver/op_request.h" #include "proto/chunk.pb.h" +#include "src/chunkserver/op_request.h" namespace curve { namespace chunkserver { /** - * 携带op request的所有上下文的closure,通过braft::Task传递给raft处理, - * 调用会有两个地方: - * 1.op request正常的被raft处理,最后on apply的时候会调用返回 - * 2.op request被打包给raft处理之后,但是还没有来得及处理就出错了,例如leader - * step down变为了非leader,那么会明确的提前向client返回错误 + * Carry all the contextual closures of the op request and pass them to the raft + * for processing through the braft::Task, There are two places to call: + * 1. The op request is processed normally by the raft, and will be called and + * returned when it is finally applied + * 2. After the op request was packaged for raft processing, an error occurred + * before it could be processed, such as leader If the step down becomes a non + * leader, it will explicitly return an error to the client in advance */ class ChunkClosure : public braft::Closure { public: @@ -49,37 +52,37 @@ class ChunkClosure : public braft::Closure { void Run() override; public: - // 包含了op request 的上下文信息 + // Contains contextual information for op request std::shared_ptr request_; }; class ScanChunkClosure : public google::protobuf::Closure { public: - ScanChunkClosure(ChunkRequest *request, ChunkResponse *response) : - request_(request), response_(response) {} + ScanChunkClosure(ChunkRequest* request, ChunkResponse* response) + : request_(request), response_(response) {} ~ScanChunkClosure() = default; void Run() override; public: - ChunkRequest *request_; - ChunkResponse *response_; + ChunkRequest* request_; + ChunkResponse* response_; }; class SendScanMapClosure : public google::protobuf::Closure { public: - SendScanMapClosure(FollowScanMapRequest * request, - FollowScanMapResponse *response, - uint64_t timeout, - uint32_t retry, - uint64_t retryIntervalUs, - brpc::Controller* cntl, - brpc::Channel *channel) : - request_(request), response_(response), - rpcTimeoutMs_(timeout), retry_(retry), - retryIntervalUs_(retryIntervalUs), - cntl_(cntl), channel_(channel) {} + SendScanMapClosure(FollowScanMapRequest* request, + FollowScanMapResponse* response, uint64_t timeout, + uint32_t retry, uint64_t retryIntervalUs, + brpc::Controller* cntl, brpc::Channel* channel) + : request_(request), + response_(response), + rpcTimeoutMs_(timeout), + retry_(retry), + retryIntervalUs_(retryIntervalUs), + cntl_(cntl), + channel_(channel) {} ~SendScanMapClosure() = default; @@ -89,13 +92,13 @@ class SendScanMapClosure : public google::protobuf::Closure { void Guard(); public: - FollowScanMapRequest *request_; - FollowScanMapResponse *response_; + FollowScanMapRequest* request_; + FollowScanMapResponse* response_; uint64_t rpcTimeoutMs_; uint32_t retry_; uint64_t retryIntervalUs_; - brpc::Controller *cntl_; - brpc::Channel *channel_; + brpc::Controller* cntl_; + brpc::Channel* channel_; }; } // namespace chunkserver diff --git a/src/chunkserver/chunk_service.cpp b/src/chunkserver/chunk_service.cpp index d6e9034641..85d3d241a5 100755 --- a/src/chunkserver/chunk_service.cpp +++ b/src/chunkserver/chunk_service.cpp @@ -22,31 +22,30 @@ #include "src/chunkserver/chunk_service.h" -#include #include #include +#include -#include #include +#include #include +#include "include/curve_compiler_specific.h" +#include "src/chunkserver/chunk_service_closure.h" +#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/chunkserver_metrics.h" #include "src/chunkserver/op_request.h" -#include "src/chunkserver/chunk_service_closure.h" #include "src/common/fast_align.h" -#include "include/curve_compiler_specific.h" - namespace curve { namespace chunkserver { using ::curve::common::is_aligned; ChunkServiceImpl::ChunkServiceImpl( - const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr& epochMap) + const ChunkServiceOptions& chunkServiceOptions, + const std::shared_ptr& epochMap) : chunkServiceOptions_(chunkServiceOptions), copysetNodeManager_(chunkServiceOptions.copysetNodeManager), inflightThrottle_(chunkServiceOptions.inflightThrottle), @@ -55,15 +54,11 @@ ChunkServiceImpl::ChunkServiceImpl( maxChunkSize_ = copysetNodeManager_->GetCopysetNodeOptions().maxChunkSize; } -void ChunkServiceImpl::DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::DeleteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -76,7 +71,7 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -86,24 +81,17 @@ void ChunkServiceImpl::DeleteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::WriteChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -116,11 +104,11 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - brpc::Controller *cntl = dynamic_cast(controller); + brpc::Controller* cntl = dynamic_cast(controller); DVLOG(9) << "Get write I/O request, op: " << request->optype() - << " offset: " << request->offset() - << " size: " << request->size() << " buf header: " - << *(unsigned int *) cntl->request_attachment().to_string().c_str() + << " offset: " << request->offset() << " size: " << request->size() + << " buf header: " + << *(unsigned int*)cntl->request_attachment().to_string().c_str() << " attachement size " << cntl->request_attachment().size(); if (request->has_epoch()) { @@ -134,7 +122,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, } } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "I/O request, op: " << request->optype() @@ -144,7 +132,7 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -154,24 +142,18 @@ void ChunkServiceImpl::WriteChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -184,7 +166,8 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 请求创建的chunk大小和copyset配置的大小不一致 + // The chunk size requested for creation does not match the size configured + // for copyset if (request->size() != maxChunkSize_) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); DVLOG(9) << "Invalid chunk size: " << request->optype() @@ -193,7 +176,7 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -203,19 +186,15 @@ void ChunkServiceImpl::CreateCloneChunk(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared( + nodePtr, controller, request, response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done) { +void ChunkServiceImpl::CreateS3CloneChunk( + RpcController* controller, const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, Closure* done) { (void)controller; (void)request; brpc::ClosureGuard doneGuard(done); @@ -223,15 +202,11 @@ void ChunkServiceImpl::CreateS3CloneChunk(RpcController* controller, LOG(INFO) << "Invalid request, serverSide Not implement yet"; } -void ChunkServiceImpl::ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -244,7 +219,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -254,7 +229,7 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -264,25 +239,17 @@ void ChunkServiceImpl::ReadChunk(RpcController *controller, return; } - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::RecoverChunk(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -295,7 +262,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "I/O request, op: " << request->optype() @@ -305,7 +272,7 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -315,26 +282,19 @@ void ChunkServiceImpl::RecoverChunk(RpcController *controller, return; } - // RecoverChunk请求和ReadChunk请求共用ReadChunkRequest - std::shared_ptr req = - std::make_shared(nodePtr, - chunkServiceOptions_.cloneManager, - controller, - request, - response, - doneGuard.release()); + // RecoverChunk request and ReadChunk request share ReadChunkRequest + std::shared_ptr req = std::make_shared( + nodePtr, chunkServiceOptions_.cloneManager, controller, request, + response, doneGuard.release()); req->Process(); } -void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); +void ChunkServiceImpl::ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, + Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -347,13 +307,13 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->size())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -363,25 +323,17 @@ void ChunkServiceImpl::ReadChunkSnapshot(RpcController *controller, return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( - RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done) { - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - request, - response, - done); + RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done) { + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, request, response, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -401,7 +353,7 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - // 判断copyset是否存在 + // Determine if the copyset exists auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), request->copysetid()); if (nullptr == nodePtr) { @@ -412,31 +364,26 @@ void ChunkServiceImpl::DeleteChunkSnapshotOrCorrectSn( return; } - std::shared_ptr - req = std::make_shared(nodePtr, - controller, - request, - response, - doneGuard.release()); + std::shared_ptr req = + std::make_shared(nodePtr, controller, request, + response, doneGuard.release()); req->Process(); } /** - * 当前GetChunkInfo在rpc service层定义和Chunk Service分离的, - * 且其并不经过QoS或者raft一致性协议,所以这里没有让其继承 - * OpRequest或者QoSRequest来重新封装,而是直接原地处理掉了 + * Currently, GetChunkInfo is defined in the rpc service layer and separated + * from Chunk Service, And it does not go through QoS or raft consistency + * protocols, so it is not allowed to inherit here OpRequest or QoSRequest to be + * re encapsulated, but directly processed in place */ -void ChunkServiceImpl::GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, + Closure* done) { (void)controller; - ChunkServiceClosure* closure = - new (std::nothrow) ChunkServiceClosure(inflightThrottle_, - nullptr, - nullptr, - done); + ChunkServiceClosure* closure = new (std::nothrow) + ChunkServiceClosure(inflightThrottle_, nullptr, nullptr, done); CHECK(nullptr != closure) << "new chunk service closure failed"; brpc::ClosureGuard doneGuard(closure); @@ -449,10 +396,9 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkInfo failed, copyset node is not found: " @@ -460,7 +406,7 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, return; } - // 检查任期和自己是不是Leader + // Check tenure and whether you are a leader if (!nodePtr->IsLeaderTerm()) { PeerId leader = nodePtr->GetLeaderId(); if (!leader.is_empty()) { @@ -476,16 +422,15 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, ret = nodePtr->GetDataStore()->GetChunkInfo(request->chunkid(), &chunkInfo); if (CSErrorCode::Success == ret) { - // 1.成功,此时chunk文件肯定存在 + // 1. Success, the chunk file must exist at this time response->add_chunksn(chunkInfo.curSn); - if (chunkInfo.snapSn > 0) - response->add_chunksn(chunkInfo.snapSn); + if (chunkInfo.snapSn > 0) response->add_chunksn(chunkInfo.snapSn); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回的版本集合为空 + // 2. Chunk file does not exist, returned version set is empty response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk info failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -497,14 +442,14 @@ void ChunkServiceImpl::GetChunkInfo(RpcController *controller, } } -void ChunkServiceImpl::GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done) { +void ChunkServiceImpl::GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); - // 判断request参数是否合法 + // Determine whether the request parameter is legal if (!CheckRequestOffsetAndLength(request->offset(), request->length())) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST); LOG(ERROR) << "GetChunkHash illegal parameter:" @@ -517,10 +462,9 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, return; } - // 判断copyset是否存在 - auto nodePtr = - copysetNodeManager_->GetCopysetNode(request->logicpoolid(), - request->copysetid()); + // Determine if the copyset exists + auto nodePtr = copysetNodeManager_->GetCopysetNode(request->logicpoolid(), + request->copysetid()); if (nullptr == nodePtr) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST); LOG(WARNING) << "GetChunkHash failed, copyset node is not found: " @@ -531,21 +475,19 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, CSErrorCode ret; std::string hash; - ret = nodePtr->GetDataStore()->GetChunkHash(request->chunkid(), - request->offset(), - request->length(), - &hash); + ret = nodePtr->GetDataStore()->GetChunkHash( + request->chunkid(), request->offset(), request->length(), &hash); if (CSErrorCode::Success == ret) { - // 1.成功 + // 1. Success response->set_hash(hash); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else if (CSErrorCode::ChunkNotExistError == ret) { - // 2.chunk文件不存在,返回0的hash值 + // 2. Chunk file does not exist, return a hash value of 0 response->set_hash("0"); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); } else { - // 3.其他错误 + // 3. Other errors LOG(ERROR) << "get chunk hash failed, " << " logic pool id: " << request->logicpoolid() << " copyset id: " << request->copysetid() @@ -557,18 +499,17 @@ void ChunkServiceImpl::GetChunkHash(RpcController *controller, } } -void ChunkServiceImpl::UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done) { +void ChunkServiceImpl::UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, + Closure* done) { (void)controller; brpc::ClosureGuard doneGuard(done); bool success = epochMap_->UpdateEpoch(request->fileid(), request->epoch()); if (success) { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); LOG(INFO) << "Update fileId: " << request->fileid() - << " to epoch: " << request->epoch() - << " success."; + << " to epoch: " << request->epoch() << " success."; } else { response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD); LOG(WARNING) << "Update fileId: " << request->fileid() @@ -579,7 +520,7 @@ void ChunkServiceImpl::UpdateEpoch(RpcController *controller, bool ChunkServiceImpl::CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const { - // 检查offset+len是否越界 + // Check if offset+len is out of range if (CURVE_UNLIKELY(offset + len > maxChunkSize_)) { return false; } diff --git a/src/chunkserver/chunk_service.h b/src/chunkserver/chunk_service.h index e15aea389b..6792c230e1 100755 --- a/src/chunkserver/chunk_service.h +++ b/src/chunkserver/chunk_service.h @@ -23,9 +23,9 @@ #ifndef SRC_CHUNKSERVER_CHUNK_SERVICE_H_ #define SRC_CHUNKSERVER_CHUNK_SERVICE_H_ -#include #include #include +#include #include "proto/chunk.pb.h" #include "src/chunkserver/config_info.h" @@ -34,84 +34,71 @@ namespace curve { namespace chunkserver { -using ::google::protobuf::RpcController; using ::google::protobuf::Closure; +using ::google::protobuf::RpcController; class CopysetNodeManager; class ChunkServiceImpl : public ChunkService { public: explicit ChunkServiceImpl(const ChunkServiceOptions& chunkServiceOptions, - const std::shared_ptr &epochMap); + const std::shared_ptr& epochMap); ~ChunkServiceImpl() {} - void DeleteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void WriteChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void ReadChunkSnapshot(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void DeleteChunkSnapshotOrCorrectSn(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void CreateCloneChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); + void DeleteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void WriteChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void ReadChunkSnapshot(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); + + void DeleteChunkSnapshotOrCorrectSn(RpcController* controller, + const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void CreateCloneChunk(RpcController* controller, + const ChunkRequest* request, ChunkResponse* response, + Closure* done); void CreateS3CloneChunk(RpcController* controller, - const CreateS3CloneChunkRequest* request, - CreateS3CloneChunkResponse* response, - Closure* done); - void RecoverChunk(RpcController *controller, - const ChunkRequest *request, - ChunkResponse *response, - Closure *done); - - void GetChunkInfo(RpcController *controller, - const GetChunkInfoRequest *request, - GetChunkInfoResponse *response, - Closure *done); - - void GetChunkHash(RpcController *controller, - const GetChunkHashRequest *request, - GetChunkHashResponse *response, - Closure *done); - - void UpdateEpoch(RpcController *controller, - const UpdateEpochRequest *request, - UpdateEpochResponse *response, - Closure *done); + const CreateS3CloneChunkRequest* request, + CreateS3CloneChunkResponse* response, + Closure* done); + void RecoverChunk(RpcController* controller, const ChunkRequest* request, + ChunkResponse* response, Closure* done); + + void GetChunkInfo(RpcController* controller, + const GetChunkInfoRequest* request, + GetChunkInfoResponse* response, Closure* done); + + void GetChunkHash(RpcController* controller, + const GetChunkHashRequest* request, + GetChunkHashResponse* response, Closure* done); + + void UpdateEpoch(RpcController* controller, + const UpdateEpochRequest* request, + UpdateEpochResponse* response, Closure* done); private: /** - * 验证op request的offset和length是否越界和对齐 - * @param offset[in]: op request' offset - * @param len[in]: op request' length - * @return true,说明合法,否则返回false + * Validates whether the offset and length of the op request are within + * bounds and aligned. + * @param offset[in]: Offset of the op request. + * @param len[in]: Length of the op request. + * @return true if valid, false otherwise. */ bool CheckRequestOffsetAndLength(uint32_t offset, uint32_t len) const; private: ChunkServiceOptions chunkServiceOptions_; - CopysetNodeManager *copysetNodeManager_; + CopysetNodeManager* copysetNodeManager_; std::shared_ptr inflightThrottle_; - uint32_t maxChunkSize_; + uint32_t maxChunkSize_; std::shared_ptr epochMap_; uint32_t blockSize_; diff --git a/src/chunkserver/chunk_service_closure.cpp b/src/chunkserver/chunk_service_closure.cpp index d680b37d93..32362d04d2 100644 --- a/src/chunkserver/chunk_service_closure.cpp +++ b/src/chunkserver/chunk_service_closure.cpp @@ -21,6 +21,7 @@ */ #include "src/chunkserver/chunk_service_closure.h" + #include #include "src/chunkserver/chunkserver_metrics.h" @@ -30,55 +31,53 @@ namespace chunkserver { void ChunkServiceClosure::Run() { /** - * 在Run结束之后,自动析构自己,这样可以避免 - * 析构函数漏调 + * After the completion of Run, automatically destructs itself to + * avoid missing the destructor call. */ std::unique_ptr selfGuard(this); { - // 所有brpcDone_调用之前要做的操作都放到这个生命周期内 + // All operations to be performed before any brpcDone_ invocation are + // placed within this lifecycle. brpc::ClosureGuard doneGuard(brpcDone_); - // 记录请求处理结果,收集到metric中 + // Record the request processing results and collect them in metric OnResonse(); } - // closure调用的时候减1,closure创建的什么加1 - // 这一行必须放在brpcDone_调用之后,ut里需要测试inflightio超过限制时的表现 - // 会在传进来的closure里面加一个sleep来控制inflightio个数 + // Decrement by 1 when the closure is invoked, and increment by 1 when the + // closure is created. This line must be placed after the brpcDone_ + // invocation. It is necessary to test the behavior when inflightio exceeds + // the limit in unit tests. A sleep is added in the provided closure to + // control the number of inflightio. if (nullptr != inflightThrottle_) { inflightThrottle_->Decrement(); } } void ChunkServiceClosure::OnRequest() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 根据request类型统计请求数量 + // Count the number of requests based on their type ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::READ_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::WRITE_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::RECOVER_CHUNK); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - metric->OnRequest(request_->logicpoolid(), - request_->copysetid(), + metric->OnRequest(request_->logicpoolid(), request_->copysetid(), CSIOMetricType::PASTE_CHUNK); break; } @@ -88,62 +87,51 @@ void ChunkServiceClosure::OnRequest() { } void ChunkServiceClosure::OnResonse() { - // 如果request或者response为空就不统计metric - if (request_ == nullptr || response_ == nullptr) - return; + // If request or response is empty, metric will not be counted + if (request_ == nullptr || response_ == nullptr) return; - // 可以根据response中的返回值来统计此次请求的处理结果 + // The processing result of this request can be calculated based on the + // return value in the response ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); bool hasError = false; uint64_t latencyUs = common::TimeUtility::GetTimeofDayUs() - receivedTimeUs_; switch (request_->optype()) { case CHUNK_OP_TYPE::CHUNK_OP_READ: { - // 如果是read请求,返回CHUNK_OP_STATUS_CHUNK_NOTEXIST也认为是正确的 - hasError = (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && - (response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); + // For read requests, returning CHUNK_OP_STATUS_CHUNK_NOTEXIST is + // also considered correct + hasError = (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) && + (response_->status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST); - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::READ_CHUNK, - request_->size(), - latencyUs, - hasError); + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::READ_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_WRITE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::WRITE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::WRITE_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_RECOVER: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::RECOVER_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::RECOVER_CHUNK, request_->size(), + latencyUs, hasError); break; } case CHUNK_OP_TYPE::CHUNK_OP_PASTE: { - hasError = response_->status() - != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; - metric->OnResponse(request_->logicpoolid(), - request_->copysetid(), - CSIOMetricType::PASTE_CHUNK, - request_->size(), - latencyUs, - hasError); + hasError = + response_->status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS; + metric->OnResponse(request_->logicpoolid(), request_->copysetid(), + CSIOMetricType::PASTE_CHUNK, request_->size(), + latencyUs, hasError); break; } default: diff --git a/src/chunkserver/chunk_service_closure.h b/src/chunkserver/chunk_service_closure.h index b6dc7d4d65..1d1e79c02a 100755 --- a/src/chunkserver/chunk_service_closure.h +++ b/src/chunkserver/chunk_service_closure.h @@ -24,66 +24,71 @@ #define SRC_CHUNKSERVER_CHUNK_SERVICE_CLOSURE_H_ #include + #include #include "proto/chunk.pb.h" -#include "src/chunkserver/op_request.h" #include "src/chunkserver/inflight_throttle.h" +#include "src/chunkserver/op_request.h" #include "src/common/timeutility.h" namespace curve { namespace chunkserver { -// chunk service层的闭包,对rpc的闭包再做一层封装,用于请求返回时统计metric信息 +// The closure of the chunk service layer encapsulates the closure of the rpc +// layer, which is used to count metric information when requesting returns class ChunkServiceClosure : public braft::Closure { public: explicit ChunkServiceClosure( - std::shared_ptr inflightThrottle, - const ChunkRequest *request, - ChunkResponse *response, - google::protobuf::Closure *done) - : inflightThrottle_(inflightThrottle) - , request_(request) - , response_(response) - , brpcDone_(done) - , receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { - // closure创建的什么加1,closure调用的时候减1 - if (nullptr != inflightThrottle_) { - inflightThrottle_->Increment(); - } - // 统计请求数量 - OnRequest(); + std::shared_ptr inflightThrottle, + const ChunkRequest* request, ChunkResponse* response, + google::protobuf::Closure* done) + : inflightThrottle_(inflightThrottle), + request_(request), + response_(response), + brpcDone_(done), + receivedTimeUs_(common::TimeUtility::GetTimeofDayUs()) { + // Increment by 1 when the closure is created, and decrement by 1 when + // the closure is invoked. + if (nullptr != inflightThrottle_) { + inflightThrottle_->Increment(); } + // Count the number of requests + OnRequest(); + } ~ChunkServiceClosure() = default; /** - * 该闭包的guard生命周期结束时会调用该函数 - * 该函数内目前主要是对读写请求返回结果的一些metric统计 - * 后面如果有类似的场景(在service请求结束时做一些处理)可以在内部添加逻辑 + * This function will be called at the end of the guard lifecycle of the + * closure Currently, this function mainly performs some metric statistics + * on the returned results of read and write requests If there are similar + * scenarios in the future (doing some processing at the end of the service + * request), logic can be added internally */ void Run() override; private: /** - * 统计请求数量和速率 + * Count the number and rate of requests */ void OnRequest(); /** - * 记录请求处理的结果,例如请求是否出错、请求的延时等 + * Record the results of request processing, such as whether the request was + * incorrect, the delay of the request, etc */ void OnResonse(); private: - // inflight流控 + // inflight flow control std::shared_ptr inflightThrottle_; - // rpc请求的request - const ChunkRequest *request_; - // rpc请求的response - ChunkResponse *response_; - // rpc请求回调 - google::protobuf::Closure *brpcDone_; - // 接受到请求的时间 + // Request for rpc requests + const ChunkRequest* request_; + // Response to rpc requests + ChunkResponse* response_; + // Rpc request callback + google::protobuf::Closure* brpcDone_; + // Time of receiving the request uint64_t receivedTimeUs_; }; diff --git a/src/chunkserver/chunkserver.cpp b/src/chunkserver/chunkserver.cpp index 22f302c9da..5a1911dd73 100644 --- a/src/chunkserver/chunkserver.cpp +++ b/src/chunkserver/chunkserver.cpp @@ -44,15 +44,15 @@ #include "src/common/bytes_convert.h" #include "src/common/concurrent/task_thread_pool.h" #include "src/common/curve_version.h" -#include "src/common/uri_parser.h" #include "src/common/log_util.h" +#include "src/common/uri_parser.h" +using ::curve::chunkserver::concurrent::ConcurrentApplyModule; +using ::curve::common::UriParser; +using ::curve::fs::FileSystemType; using ::curve::fs::LocalFileSystem; using ::curve::fs::LocalFileSystemOption; using ::curve::fs::LocalFsFactory; -using ::curve::fs::FileSystemType; -using ::curve::chunkserver::concurrent::ConcurrentApplyModule; -using ::curve::common::UriParser; DEFINE_string(conf, "ChunkServer.conf", "Path of configuration file"); DEFINE_string(chunkServerIp, "127.0.0.1", "chunkserver ip"); @@ -60,19 +60,19 @@ DEFINE_bool(enableExternalServer, false, "start external server or not"); DEFINE_string(chunkServerExternalIp, "127.0.0.1", "chunkserver external ip"); DEFINE_int32(chunkServerPort, 8200, "chunkserver port"); DEFINE_string(chunkServerStoreUri, "local://./0/", "chunkserver store uri"); -DEFINE_string(chunkServerMetaUri, - "local://./0/chunkserver.dat", "chunkserver meta uri"); +DEFINE_string(chunkServerMetaUri, "local://./0/chunkserver.dat", + "chunkserver meta uri"); DEFINE_string(copySetUri, "local://./0/copysets", "copyset data uri"); DEFINE_string(raftSnapshotUri, "curve://./0/copysets", "raft snapshot uri"); DEFINE_string(raftLogUri, "curve://./0/copysets", "raft log uri"); -DEFINE_string(recycleUri, "local://./0/recycler" , "recycle uri"); +DEFINE_string(recycleUri, "local://./0/recycler", "recycle uri"); DEFINE_string(chunkFilePoolDir, "./0/", "chunk file pool location"); DEFINE_int32(chunkFilePoolAllocatedPercent, 80, "format percent for chunkfillpool."); DEFINE_uint32(chunkFormatThreadNum, 1, "number of threads while file pool formatting"); -DEFINE_string(chunkFilePoolMetaPath, - "./chunkfilepool.meta", "chunk file pool meta path"); +DEFINE_string(chunkFilePoolMetaPath, "./chunkfilepool.meta", + "chunk file pool meta path"); DEFINE_string(logPath, "./0/chunkserver.log-", "log file path"); DEFINE_string(mdsListenAddr, "127.0.0.1:6666", "mds listen addr"); DEFINE_bool(enableChunkfilepool, true, "enable chunkfilepool"); @@ -80,8 +80,7 @@ DEFINE_uint32(copysetLoadConcurrency, 5, "copyset load concurrency"); DEFINE_bool(enableWalfilepool, true, "enable WAL filepool"); DEFINE_string(walFilePoolDir, "./0/", "WAL filepool location"); DEFINE_string(walFilePoolMetaPath, "./walfilepool.meta", - "WAL filepool meta path"); - + "WAL filepool meta path"); const char* kProtocalCurve = "curve"; @@ -93,57 +92,56 @@ int ChunkServer::Run(int argc, char** argv) { RegisterCurveSegmentLogStorageOrDie(); - // ==========================加载配置项===============================// + // =====================Load Configuration Items=======================// LOG(INFO) << "Loading Configuration."; common::Configuration conf; conf.SetConfigPath(FLAGS_conf.c_str()); - // 在从配置文件获取 + // Obtaining from the configuration file LOG_IF(FATAL, !conf.LoadConfig()) << "load chunkserver configuration fail, conf path = " << conf.GetConfigPath(); - // 命令行可以覆盖配置文件中的参数 + // The command line can override parameters in the configuration file LoadConfigFromCmdline(&conf); // 初始化日志模块 curve::common::DisableLoggingToStdErr(); google::InitGoogleLogging(argv[0]); - // 打印参数 + // Print parameters conf.PrintConfig(); curve::common::ExposeCurveVersion(); - // ============================初始化各模块==========================// + // ====================Initialize each module======================// LOG(INFO) << "Initializing ChunkServer modules"; - // 优先初始化 metric 收集模块 + // Prioritize initializing the metric collection module ChunkServerMetricOptions metricOptions; InitMetricOptions(&conf, &metricOptions); ChunkServerMetric* metric = ChunkServerMetric::GetInstance(); LOG_IF(FATAL, metric->Init(metricOptions) != 0) << "Failed to init chunkserver metric."; - // 初始化并发持久模块 + // Initialize concurrent persistence module ConcurrentApplyModule concurrentapply; ConcurrentApplyOption concurrentApplyOptions; InitConcurrentApplyOptions(&conf, &concurrentApplyOptions); LOG_IF(FATAL, false == concurrentapply.Init(concurrentApplyOptions)) << "Failed to initialize concurrentapply module!"; - // 初始化本地文件系统 + // Initialize local file system std::shared_ptr fs( LocalFsFactory::CreateFs(FileSystemType::EXT4, "")); LocalFileSystemOption lfsOption; - LOG_IF(FATAL, !conf.GetBoolValue( - "fs.enable_renameat2", &lfsOption.enableRenameat2)); + LOG_IF(FATAL, !conf.GetBoolValue("fs.enable_renameat2", + &lfsOption.enableRenameat2)); LOG_IF(FATAL, 0 != fs->Init(lfsOption)) << "Failed to initialize local filesystem module!"; - // 初始化chunk文件池 + // Initialize chunk file pool FilePoolOptions chunkFilePoolOptions; InitChunkFilePoolOptions(&conf, &chunkFilePoolOptions); - std::shared_ptr chunkfilePool = - std::make_shared(fs); + std::shared_ptr chunkfilePool = std::make_shared(fs); LOG_IF(FATAL, false == chunkfilePool->Initialize(chunkFilePoolOptions)) << "Failed to init chunk file pool"; @@ -156,9 +154,8 @@ int ChunkServer::Run(int argc, char** argv) { bool useChunkFilePoolAsWalPool = true; uint32_t useChunkFilePoolAsWalPoolReserve = 15; if (raftLogProtocol == kProtocalCurve) { - LOG_IF(FATAL, !conf.GetBoolValue( - "walfilepool.use_chunk_file_pool", - &useChunkFilePoolAsWalPool)); + LOG_IF(FATAL, !conf.GetBoolValue("walfilepool.use_chunk_file_pool", + &useChunkFilePoolAsWalPool)); if (!useChunkFilePoolAsWalPool) { FilePoolOptions walFilePoolOptions; @@ -170,20 +167,20 @@ int ChunkServer::Run(int argc, char** argv) { } else { walFilePool = chunkfilePool; LOG_IF(FATAL, !conf.GetUInt32Value( - "walfilepool.use_chunk_file_pool_reserve", - &useChunkFilePoolAsWalPoolReserve)); + "walfilepool.use_chunk_file_pool_reserve", + &useChunkFilePoolAsWalPoolReserve)); LOG(INFO) << "initialize to use chunkfilePool as walpool success."; } } - // 远端拷贝管理模块选项 + // Remote Copy Management Module Options CopyerOptions copyerOptions; InitCopyerOptions(&conf, ©erOptions); auto copyer = std::make_shared(); LOG_IF(FATAL, copyer->Init(copyerOptions) != 0) << "Failed to initialize clone copyer."; - // 克隆管理模块初始化 + // Clone Management Module Initialization CloneOptions cloneOptions; InitCloneOptions(&conf, &cloneOptions); uint32_t sliceSize; @@ -195,11 +192,11 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, cloneManager_.Init(cloneOptions) != 0) << "Failed to initialize clone manager."; - // 初始化注册模块 + // Initialize registration module RegisterOptions registerOptions; InitRegisterOptions(&conf, ®isterOptions); registerOptions.useChunkFilePoolAsWalPoolReserve = - useChunkFilePoolAsWalPoolReserve; + useChunkFilePoolAsWalPoolReserve; registerOptions.useChunkFilePoolAsWalPool = useChunkFilePoolAsWalPool; registerOptions.fs = fs; registerOptions.chunkFilepool = chunkfilePool; @@ -208,40 +205,39 @@ int ChunkServer::Run(int argc, char** argv) { Register registerMDS(registerOptions); ChunkServerMetadata metadata; ChunkServerMetadata localMetadata; - // 从本地获取meta - std::string metaPath = UriParser::GetPathFromUri( - registerOptions.chunkserverMetaUri); + // Get Meta from Local + std::string metaPath = + UriParser::GetPathFromUri(registerOptions.chunkserverMetaUri); auto epochMap = std::make_shared(); if (fs->FileExists(metaPath)) { LOG_IF(FATAL, GetChunkServerMetaFromLocal( - registerOptions.chunserverStoreUri, - registerOptions.chunkserverMetaUri, - registerOptions.fs, &localMetadata) != 0) + registerOptions.chunserverStoreUri, + registerOptions.chunkserverMetaUri, + registerOptions.fs, &localMetadata) != 0) << "Failed to GetChunkServerMetaFromLocal."; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - &localMetadata, &metadata, epochMap) != 0) + LOG_IF(FATAL, registerMDS.RegisterToMDS(&localMetadata, &metadata, + epochMap) != 0) << "Failed to register to MDS."; } else { - // 如果本地获取不到,向mds注册 - LOG(INFO) << "meta file " - << metaPath << " do not exist, register to mds"; - LOG_IF(FATAL, registerMDS.RegisterToMDS( - nullptr, &metadata, epochMap) != 0) + // If it cannot be obtained locally, register with MDS + LOG(INFO) << "meta file " << metaPath + << " do not exist, register to mds"; + LOG_IF(FATAL, + registerMDS.RegisterToMDS(nullptr, &metadata, epochMap) != 0) << "Failed to register to MDS."; } - // trash模块初始化 + // Trash module initialization TrashOptions trashOptions; InitTrashOptions(&conf, &trashOptions); trashOptions.localFileSystem = fs; trashOptions.chunkFilePool = chunkfilePool; trashOptions.walPool = walFilePool; trash_ = std::make_shared(); - LOG_IF(FATAL, trash_->Init(trashOptions) != 0) - << "Failed to init Trash"; + LOG_IF(FATAL, trash_->Init(trashOptions) != 0) << "Failed to init Trash"; - // 初始化复制组管理模块 + // Initialize replication group management module CopysetNodeOptions copysetNodeOptions; InitCopysetNodeOptions(&conf, ©setNodeOptions); copysetNodeOptions.concurrentapply = &concurrentapply; @@ -262,23 +258,25 @@ int ChunkServer::Run(int argc, char** argv) { } } - // install snapshot的带宽限制 + // Bandwidth limitation of install snapshot int snapshotThroughputBytes; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_throughput_bytes", &snapshotThroughputBytes)); /** - * checkCycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB, - * checkCycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个 - * 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而 - * 不是20MB的带宽 + * CheckCycles is used for finer bandwidth control, with + * snapshotThroughputBytes=100MB, Taking checkCycles=10 as an example, it + * can ensure a bandwidth of 10MB every 1/10 second without accumulation, + * such as the first one The bandwidth of 1/10 second is 10MB, but it + * expires. In the second 1/10 second, only 10MB of bandwidth can be used, + * and Not a bandwidth of 20MB */ int checkCycles; LOG_IF(FATAL, !conf.GetIntValue("chunkserver.snapshot_throttle_check_cycles", &checkCycles)); - scoped_refptr snapshotThrottle - = new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); + scoped_refptr snapshotThrottle = + new ThroughputSnapshotThrottle(snapshotThroughputBytes, checkCycles); snapshotThrottle_ = snapshotThrottle; copysetNodeOptions.snapshotThrottle = &snapshotThrottle_; @@ -288,7 +286,7 @@ int ChunkServer::Run(int argc, char** argv) { return -1; } butil::EndPoint endPoint = butil::EndPoint(ip, copysetNodeOptions.port); - // 注册curve snapshot storage + // Register curve snapshot storage RegisterCurveSnapshotStorageOrDie(); CurveSnapshotStorage::set_server_addr(endPoint); copysetNodeManager_ = &CopysetNodeManager::GetInstance(); @@ -302,7 +300,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, scanManager_.Init(scanOpts) != 0) << "Failed to init scan manager."; - // 心跳模块初始化 + // Heartbeat module initialization HeartbeatOptions heartbeatOptions; InitHeartbeatOptions(&conf, &heartbeatOptions); heartbeatOptions.copysetNodeManager = copysetNodeManager_; @@ -314,7 +312,7 @@ int ChunkServer::Run(int argc, char** argv) { LOG_IF(FATAL, heartbeat_.Init(heartbeatOptions) != 0) << "Failed to init Heartbeat manager."; - // 监控部分模块的metric指标 + // Metric indicators for monitoring some modules metric->MonitorTrash(trash_.get()); metric->MonitorChunkFilePool(chunkfilePool.get()); if (raftLogProtocol == kProtocalCurve && !useChunkFilePoolAsWalPool) { @@ -322,8 +320,8 @@ int ChunkServer::Run(int argc, char** argv) { } metric->ExposeConfigMetric(&conf); - // ========================添加rpc服务===============================// - // TODO(lixiaocui): rpc中各接口添加上延迟metric + // =====================Add RPC Service===================== // + // TODO(lixiaocui): Add delay metric to each interface in rpc brpc::Server server; brpc::Server externalServer; // We need call braft::add_service to add endPoint to braft::NodeManager @@ -331,17 +329,16 @@ int ChunkServer::Run(int argc, char** argv) { // copyset service CopysetServiceImpl copysetService(copysetNodeManager_); - int ret = server.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); + int ret = + server.AddService(©setService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService"; // inflight throttle int maxInflight; - LOG_IF(FATAL, - !conf.GetIntValue("chunkserver.max_inflight_requests", - &maxInflight)); - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + LOG_IF(FATAL, !conf.GetIntValue("chunkserver.max_inflight_requests", + &maxInflight)); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service @@ -351,8 +348,7 @@ int ChunkServer::Run(int argc, char** argv) { chunkServiceOptions.inflightThrottle = inflightThrottle; ChunkServiceImpl chunkService(chunkServiceOptions, epochMap); - ret = server.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&chunkService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService"; // We need to replace braft::CliService with our own implementation @@ -360,14 +356,12 @@ int ChunkServer::Run(int argc, char** argv) { ret = server.RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::CliService"; BRaftCliServiceImpl braftCliService; - ret = server.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&braftCliService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService"; // braftclient service BRaftCliServiceImpl2 braftCliService2; - ret = server.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = server.AddService(&braftCliService2, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService2"; // We need to replace braft::FileServiceImpl with our own implementation @@ -375,51 +369,53 @@ int ChunkServer::Run(int argc, char** argv) { ret = server.RemoveService(service); CHECK(0 == ret) << "Fail to remove braft::FileService"; kCurveFileService.set_snapshot_attachment(new CurveSnapshotAttachment(fs)); - ret = server.AddService(&kCurveFileService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&kCurveFileService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CurveFileService"; // chunkserver service ChunkServerServiceImpl chunkserverService(copysetNodeManager_); - ret = server.AddService(&chunkserverService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&chunkserverService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkServerService"; // scan copyset service ScanServiceImpl scanCopysetService(&scanManager_); - ret = server.AddService(&scanCopysetService, - brpc::SERVER_DOESNT_OWN_SERVICE); + ret = + server.AddService(&scanCopysetService, brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ScanCopysetService"; - // 启动rpc service + // Start rpc service LOG(INFO) << "Internal server is going to serve on: " << copysetNodeOptions.ip << ":" << copysetNodeOptions.port; if (server.Start(endPoint, NULL) != 0) { LOG(ERROR) << "Fail to start Internal Server"; return -1; } - /* 启动external server - external server用于向client和工具等外部提供服务 - 区别于mds和chunkserver之间的通信*/ + /* Start external server + external server is used to provide services to external clients and + tools Different from communication between MDS and chunkserver + */ if (registerOptions.enableExternalServer) { ret = externalServer.AddService(©setService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNbT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add CopysetService at external server"; ret = externalServer.AddService(&chunkService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add ChunkService at external server"; ret = externalServer.AddService(&braftCliService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService at external server"; ret = externalServer.AddService(&braftCliService2, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add BRaftCliService2 at external server"; braft::RaftStatImpl raftStatService; ret = externalServer.AddService(&raftStatService, - brpc::SERVER_DOESNT_OWN_SERVICE); + brpc::SERVER_DOESNT_OWN_SERVICE); CHECK(0 == ret) << "Fail to add RaftStatService at external server"; - std::string externalAddr = registerOptions.chunkserverExternalIp + ":" + - std::to_string(registerOptions.chunkserverPort); + std::string externalAddr = + registerOptions.chunkserverExternalIp + ":" + + std::to_string(registerOptions.chunkserverPort); LOG(INFO) << "External server is going to serve on: " << externalAddr; if (externalServer.Start(externalAddr.c_str(), NULL) != 0) { LOG(ERROR) << "Fail to start External Server"; @@ -427,30 +423,29 @@ int ChunkServer::Run(int argc, char** argv) { } } - // =======================启动各模块==================================// + // ===============Start each module=============== // LOG(INFO) << "ChunkServer starts."; /** - * 将模块启动放到rpc 服务启动后面,主要是为了解决内存增长的问题 - * 控制并发恢复的copyset数量,copyset恢复需要依赖rpc服务先启动 + * Placing module startup after RPC service startup is mainly to address + * memory growth issues Control the number of copysets for concurrent + * recovery. Copyset recovery requires the RPC service to be started first */ - LOG_IF(FATAL, trash_->Run() != 0) - << "Failed to start trash."; - LOG_IF(FATAL, cloneManager_.Run() != 0) - << "Failed to start clone manager."; + LOG_IF(FATAL, trash_->Run() != 0) << "Failed to start trash."; + LOG_IF(FATAL, cloneManager_.Run() != 0) << "Failed to start clone manager."; LOG_IF(FATAL, heartbeat_.Run() != 0) << "Failed to start heartbeat manager."; LOG_IF(FATAL, copysetNodeManager_->Run() != 0) << "Failed to start CopysetNodeManager."; - LOG_IF(FATAL, scanManager_.Run() != 0) - << "Failed to start scan manager."; + LOG_IF(FATAL, scanManager_.Run() != 0) << "Failed to start scan manager."; LOG_IF(FATAL, !chunkfilePool->StartCleaning()) << "Failed to start file pool clean worker."; - // =======================等待进程退出==================================// + // ===============Wait for the process to exit=============== // while (!brpc::IsAskedToQuit()) { bthread_usleep(1000000L); } - // scanmanager stop maybe need a little while, so stop it first before stop service NOLINT + // scanmanager stop maybe need a little while, NOLINT + // so stop it first before stop service NOLINT LOG(INFO) << "ChunkServer is going to quit."; LOG_IF(ERROR, scanManager_.Fini() != 0) << "Failed to shutdown scan manager."; @@ -469,10 +464,8 @@ int ChunkServer::Run(int argc, char** argv) { << "Failed to shutdown CopysetNodeManager."; LOG_IF(ERROR, cloneManager_.Fini() != 0) << "Failed to shutdown clone manager."; - LOG_IF(ERROR, copyer->Fini() != 0) - << "Failed to shutdown clone copyer."; - LOG_IF(ERROR, trash_->Fini() != 0) - << "Failed to shutdown trash."; + LOG_IF(ERROR, copyer->Fini() != 0) << "Failed to shutdown clone copyer."; + LOG_IF(ERROR, trash_->Fini() != 0) << "Failed to shutdown trash."; LOG_IF(ERROR, !chunkfilePool->StopCleaning()) << "Failed to shutdown file pool clean worker."; concurrentapply.Stop(); @@ -481,14 +474,12 @@ int ChunkServer::Run(int argc, char** argv) { return 0; } -void ChunkServer::Stop() { - brpc::AskToQuit(); -} +void ChunkServer::Stop() { brpc::AskToQuit(); } void ChunkServer::InitChunkFilePoolOptions( - common::Configuration *conf, FilePoolOptions *chunkFilePoolOptions) { + common::Configuration* conf, FilePoolOptions* chunkFilePoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - &chunkFilePoolOptions->fileSize)); + &chunkFilePoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", &chunkFilePoolOptions->metaPageSize)) @@ -499,24 +490,23 @@ void ChunkServer::InitChunkFilePoolOptions( << "Not found `global.block_size` in config file"; LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.cpmeta_file_size", - &chunkFilePoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "chunkfilepool.enable_get_chunk_from_pool", - &chunkFilePoolOptions->getFileFromPool)); + &chunkFilePoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("chunkfilepool.enable_get_chunk_from_pool", + &chunkFilePoolOptions->getFileFromPool)); if (chunkFilePoolOptions->getFileFromPool == false) { std::string chunkFilePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.chunk_file_pool_dir", &chunkFilePoolUri)); - ::memcpy(chunkFilePoolOptions->filePoolDir, - chunkFilePoolUri.c_str(), + LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", + &chunkFilePoolUri)); + ::memcpy(chunkFilePoolOptions->filePoolDir, chunkFilePoolUri.c_str(), chunkFilePoolUri.size()); } else { std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "chunkfilepool.meta_path", &metaUri)); - ::memcpy( - chunkFilePoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + LOG_IF(FATAL, + !conf->GetStringValue("chunkfilepool.meta_path", &metaUri)); + ::memcpy(chunkFilePoolOptions->metaPath, metaUri.c_str(), + metaUri.size()); std::string chunkFilePoolUri; LOG_IF(FATAL, !conf->GetStringValue("chunkfilepool.chunk_file_pool_dir", @@ -539,12 +529,12 @@ void ChunkServer::InitChunkFilePoolOptions( "chunkfilepool.chunk_file_pool_format_thread_num", &chunkFilePoolOptions->formatThreadNum)); LOG_IF(FATAL, !conf->GetBoolValue("chunkfilepool.clean.enable", - &chunkFilePoolOptions->needClean)); + &chunkFilePoolOptions->needClean)); LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.bytes_per_write", &chunkFilePoolOptions->bytesPerWrite)); LOG_IF(FATAL, !conf->GetUInt32Value("chunkfilepool.clean.throttle_iops", - &chunkFilePoolOptions->iops4clean)); + &chunkFilePoolOptions->iops4clean)); std::string copysetUri; LOG_IF(FATAL, @@ -567,9 +557,9 @@ void ChunkServer::InitChunkFilePoolOptions( (useChunkFilePoolAsWalPool && Trash::IsWALFile(filename)); }; - if (0 == chunkFilePoolOptions->bytesPerWrite - || chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 - || 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { + if (0 == chunkFilePoolOptions->bytesPerWrite || + chunkFilePoolOptions->bytesPerWrite > 1 * 1024 * 1024 || + 0 != chunkFilePoolOptions->bytesPerWrite % 4096) { LOG(FATAL) << "The bytesPerWrite must be in [1, 1048576] " << "and should be aligned to 4K, " << "but now is: " << chunkFilePoolOptions->bytesPerWrite; @@ -577,41 +567,40 @@ void ChunkServer::InitChunkFilePoolOptions( } } -void ChunkServer::InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOptions) { - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.size", &concurrentApplyOptions->rconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.size", &concurrentApplyOptions->wconcurrentsize)); - LOG_IF(FATAL, !conf->GetIntValue( - "rconcurrentapply.queuedepth", &concurrentApplyOptions->rqueuedepth)); - LOG_IF(FATAL, !conf->GetIntValue( - "wconcurrentapply.queuedepth", &concurrentApplyOptions->wqueuedepth)); +void ChunkServer::InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOptions) { + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.size", + &concurrentApplyOptions->rconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.size", + &concurrentApplyOptions->wconcurrentsize)); + LOG_IF(FATAL, !conf->GetIntValue("rconcurrentapply.queuedepth", + &concurrentApplyOptions->rqueuedepth)); + LOG_IF(FATAL, !conf->GetIntValue("wconcurrentapply.queuedepth", + &concurrentApplyOptions->wqueuedepth)); } -void ChunkServer::InitWalFilePoolOptions( - common::Configuration *conf, FilePoolOptions *walPoolOptions) { +void ChunkServer::InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.segment_size", - &walPoolOptions->fileSize)); + &walPoolOptions->fileSize)); LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.metapage_size", - &walPoolOptions->metaPageSize)); + &walPoolOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("walfilepool.meta_file_size", - &walPoolOptions->metaFileSize)); - LOG_IF(FATAL, !conf->GetBoolValue( - "walfilepool.enable_get_segment_from_pool", - &walPoolOptions->getFileFromPool)); + &walPoolOptions->metaFileSize)); + LOG_IF(FATAL, + !conf->GetBoolValue("walfilepool.enable_get_segment_from_pool", + &walPoolOptions->getFileFromPool)); if (walPoolOptions->getFileFromPool == false) { std::string filePoolUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.file_pool_dir", &filePoolUri)); - ::memcpy(walPoolOptions->filePoolDir, - filePoolUri.c_str(), + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.file_pool_dir", + &filePoolUri)); + ::memcpy(walPoolOptions->filePoolDir, filePoolUri.c_str(), filePoolUri.size()); } else { std::string metaUri; - LOG_IF(FATAL, !conf->GetStringValue( - "walfilepool.meta_path", &metaUri)); + LOG_IF(FATAL, !conf->GetStringValue("walfilepool.meta_path", &metaUri)); std::string pool_size; LOG_IF(FATAL, !conf->GetStringValue("walfilepool.chunk_file_pool_size", @@ -642,94 +631,98 @@ void ChunkServer::InitWalFilePoolOptions( walPoolOptions->isAllocated = [](const string& filename) { return Trash::IsWALFile(filename); }; - ::memcpy( - walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); + ::memcpy(walPoolOptions->metaPath, metaUri.c_str(), metaUri.size()); } } void ChunkServer::InitCopysetNodeOptions( - common::Configuration *conf, CopysetNodeOptions *copysetNodeOptions) { + common::Configuration* conf, CopysetNodeOptions* copysetNodeOptions) { LOG_IF(FATAL, !conf->GetStringValue("global.ip", ©setNodeOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", ©setNodeOptions->port)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", ©setNodeOptions->port)); if (copysetNodeOptions->port <= 0 || copysetNodeOptions->port >= 65535) { LOG(FATAL) << "Invalid server port provided: " << copysetNodeOptions->port; } LOG_IF(FATAL, !conf->GetIntValue("copyset.election_timeout_ms", - ©setNodeOptions->electionTimeoutMs)); + ©setNodeOptions->electionTimeoutMs)); LOG_IF(FATAL, !conf->GetIntValue("copyset.snapshot_interval_s", - ©setNodeOptions->snapshotIntervalS)); + ©setNodeOptions->snapshotIntervalS)); bool ret = conf->GetBoolValue("copyset.enable_lease_read", - ©setNodeOptions->enbaleLeaseRead); + ©setNodeOptions->enbaleLeaseRead); LOG_IF(WARNING, ret == false) << "config no copyset.enable_lease_read info, using default value " << copysetNodeOptions->enbaleLeaseRead; LOG_IF(FATAL, !conf->GetIntValue("copyset.catchup_margin", - ©setNodeOptions->catchupMargin)); + ©setNodeOptions->catchupMargin)); LOG_IF(FATAL, !conf->GetStringValue("copyset.chunk_data_uri", - ©setNodeOptions->chunkDataUri)); + ©setNodeOptions->chunkDataUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_log_uri", - ©setNodeOptions->logUri)); + ©setNodeOptions->logUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_meta_uri", - ©setNodeOptions->raftMetaUri)); + ©setNodeOptions->raftMetaUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.raft_snapshot_uri", - ©setNodeOptions->raftSnapshotUri)); + ©setNodeOptions->raftSnapshotUri)); LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", - ©setNodeOptions->recyclerUri)); + ©setNodeOptions->recyclerUri)); LOG_IF(FATAL, !conf->GetUInt32Value("global.chunk_size", - ©setNodeOptions->maxChunkSize)); + ©setNodeOptions->maxChunkSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - ©setNodeOptions->metaPageSize)); + ©setNodeOptions->metaPageSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.block_size", - ©setNodeOptions->blockSize)); + ©setNodeOptions->blockSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.location_limit", - ©setNodeOptions->locationLimit)); + ©setNodeOptions->locationLimit)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.load_concurrency", - ©setNodeOptions->loadConcurrency)); + ©setNodeOptions->loadConcurrency)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_retrytimes", - ©setNodeOptions->checkRetryTimes)); + ©setNodeOptions->checkRetryTimes)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.finishload_margin", - ©setNodeOptions->finishLoadMargin)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_loadmargin_interval_ms", - ©setNodeOptions->checkLoadMarginIntervalMs)); + ©setNodeOptions->finishLoadMargin)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_loadmargin_interval_ms", + ©setNodeOptions->checkLoadMarginIntervalMs)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_concurrency", - ©setNodeOptions->syncConcurrency)); + ©setNodeOptions->syncConcurrency)); LOG_IF(FATAL, !conf->GetBoolValue( - "copyset.enable_odsync_when_open_chunkfile", - ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); + "copyset.enable_odsync_when_open_chunkfile", + ©setNodeOptions->enableOdsyncWhenOpenChunkFile)); if (!copysetNodeOptions->enableOdsyncWhenOpenChunkFile) { - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_chunk_limits", - ©setNodeOptions->syncChunkLimit)); - LOG_IF(FATAL, !conf->GetUInt64Value("copyset.sync_threshold", - ©setNodeOptions->syncThreshold)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.check_syncing_interval_ms", - ©setNodeOptions->checkSyncingIntervalMs)); - LOG_IF(FATAL, !conf->GetUInt32Value("copyset.sync_trigger_seconds", - ©setNodeOptions->syncTriggerSeconds)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_chunk_limits", + ©setNodeOptions->syncChunkLimit)); + LOG_IF(FATAL, + !conf->GetUInt64Value("copyset.sync_threshold", + ©setNodeOptions->syncThreshold)); + LOG_IF(FATAL, !conf->GetUInt32Value( + "copyset.check_syncing_interval_ms", + ©setNodeOptions->checkSyncingIntervalMs)); + LOG_IF(FATAL, + !conf->GetUInt32Value("copyset.sync_trigger_seconds", + ©setNodeOptions->syncTriggerSeconds)); } } -void ChunkServer::InitCopyerOptions( - common::Configuration *conf, CopyerOptions *copyerOptions) { +void ChunkServer::InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions) { LOG_IF(FATAL, !conf->GetStringValue("curve.root_username", - ©erOptions->curveUser.owner)); + ©erOptions->curveUser.owner)); LOG_IF(FATAL, !conf->GetStringValue("curve.root_password", - ©erOptions->curveUser.password)); + ©erOptions->curveUser.password)); LOG_IF(FATAL, !conf->GetStringValue("curve.config_path", - ©erOptions->curveConf)); + ©erOptions->curveConf)); LOG_IF(FATAL, - !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); + !conf->GetStringValue("s3.config_path", ©erOptions->s3Conf)); bool disableCurveClient = false; bool disableS3Adapter = false; LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_curve_client", - &disableCurveClient)); - LOG_IF(FATAL, !conf->GetBoolValue("clone.disable_s3_adapter", - &disableS3Adapter)); + &disableCurveClient)); + LOG_IF(FATAL, + !conf->GetBoolValue("clone.disable_s3_adapter", &disableS3Adapter)); LOG_IF(FATAL, !conf->GetUInt64Value("curve.curve_file_timeout_s", - ©erOptions->curveFileTimeoutSec)); + ©erOptions->curveFileTimeoutSec)); if (disableCurveClient) { copyerOptions->curveClient = nullptr; @@ -744,105 +737,105 @@ void ChunkServer::InitCopyerOptions( } } -void ChunkServer::InitCloneOptions( - common::Configuration *conf, CloneOptions *cloneOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value("clone.thread_num", - &cloneOptions->threadNum)); +void ChunkServer::InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions) { + LOG_IF(FATAL, + !conf->GetUInt32Value("clone.thread_num", &cloneOptions->threadNum)); LOG_IF(FATAL, !conf->GetUInt32Value("clone.queue_depth", - &cloneOptions->queueCapacity)); + &cloneOptions->queueCapacity)); } -void ChunkServer::InitScanOptions( - common::Configuration *conf, ScanManagerOptions *scanOptions) { +void ChunkServer::InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions) { LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_interval_sec", - &scanOptions->intervalSec)); + &scanOptions->intervalSec)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_size_byte", - &scanOptions->scanSize)); + &scanOptions->scanSize)); LOG_IF(FATAL, !conf->GetUInt32Value("global.meta_page_size", - &scanOptions->chunkMetaPageSize)); + &scanOptions->chunkMetaPageSize)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_timeout_ms", - &scanOptions->timeoutMs)); + &scanOptions->timeoutMs)); LOG_IF(FATAL, !conf->GetUInt32Value("copyset.scan_rpc_retry_times", - &scanOptions->retry)); + &scanOptions->retry)); LOG_IF(FATAL, !conf->GetUInt64Value("copyset.scan_rpc_retry_interval_us", - &scanOptions->retryIntervalUs)); + &scanOptions->retryIntervalUs)); } -void ChunkServer::InitHeartbeatOptions( - common::Configuration *conf, HeartbeatOptions *heartbeatOptions) { +void ChunkServer::InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions) { LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - &heartbeatOptions->storeUri)); + &heartbeatOptions->storeUri)); LOG_IF(FATAL, !conf->GetStringValue("global.ip", &heartbeatOptions->ip)); - LOG_IF(FATAL, !conf->GetUInt32Value("global.port", - &heartbeatOptions->port)); + LOG_IF(FATAL, + !conf->GetUInt32Value("global.port", &heartbeatOptions->port)); LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - &heartbeatOptions->mdsListenAddr)); + &heartbeatOptions->mdsListenAddr)); LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_interval", - &heartbeatOptions->intervalSec)); + &heartbeatOptions->intervalSec)); LOG_IF(FATAL, !conf->GetUInt32Value("mds.heartbeat_timeout", - &heartbeatOptions->timeout)); + &heartbeatOptions->timeout)); } -void ChunkServer::InitRegisterOptions( - common::Configuration *conf, RegisterOptions *registerOptions) { +void ChunkServer::InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions) { LOG_IF(FATAL, !conf->GetStringValue("mds.listen.addr", - ®isterOptions->mdsListenAddr)); - LOG_IF(FATAL, !conf->GetStringValue("global.ip", - ®isterOptions->chunkserverInternalIp)); + ®isterOptions->mdsListenAddr)); + LOG_IF(FATAL, !conf->GetStringValue( + "global.ip", ®isterOptions->chunkserverInternalIp)); LOG_IF(FATAL, !conf->GetBoolValue("global.enable_external_server", - ®isterOptions->enableExternalServer)); - LOG_IF(FATAL, !conf->GetStringValue("global.external_ip", - ®isterOptions->chunkserverExternalIp)); + ®isterOptions->enableExternalServer)); + LOG_IF(FATAL, + !conf->GetStringValue("global.external_ip", + ®isterOptions->chunkserverExternalIp)); LOG_IF(FATAL, !conf->GetIntValue("global.port", - ®isterOptions->chunkserverPort)); + ®isterOptions->chunkserverPort)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.stor_uri", - ®isterOptions->chunserverStoreUri)); + ®isterOptions->chunserverStoreUri)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.meta_uri", - ®isterOptions->chunkserverMetaUri)); + ®isterOptions->chunkserverMetaUri)); LOG_IF(FATAL, !conf->GetStringValue("chunkserver.disk_type", - ®isterOptions->chunkserverDiskType)); + ®isterOptions->chunkserverDiskType)); LOG_IF(FATAL, !conf->GetIntValue("mds.register_retries", - ®isterOptions->registerRetries)); + ®isterOptions->registerRetries)); LOG_IF(FATAL, !conf->GetIntValue("mds.register_timeout", - ®isterOptions->registerTimeout)); + ®isterOptions->registerTimeout)); } -void ChunkServer::InitTrashOptions( - common::Configuration *conf, TrashOptions *trashOptions) { - LOG_IF(FATAL, !conf->GetStringValue( - "copyset.recycler_uri", &trashOptions->trashPath)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.expire_afterSec", &trashOptions->expiredAfterSec)); - LOG_IF(FATAL, !conf->GetIntValue( - "trash.scan_periodSec", &trashOptions->scanPeriodSec)); +void ChunkServer::InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions) { + LOG_IF(FATAL, !conf->GetStringValue("copyset.recycler_uri", + &trashOptions->trashPath)); + LOG_IF(FATAL, !conf->GetIntValue("trash.expire_afterSec", + &trashOptions->expiredAfterSec)); + LOG_IF(FATAL, !conf->GetIntValue("trash.scan_periodSec", + &trashOptions->scanPeriodSec)); } -void ChunkServer::InitMetricOptions( - common::Configuration *conf, ChunkServerMetricOptions *metricOptions) { - LOG_IF(FATAL, !conf->GetUInt32Value( - "global.port", &metricOptions->port)); - LOG_IF(FATAL, !conf->GetStringValue( - "global.ip", &metricOptions->ip)); - LOG_IF(FATAL, !conf->GetBoolValue( - "metric.onoff", &metricOptions->collectMetric)); +void ChunkServer::InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions) { + LOG_IF(FATAL, !conf->GetUInt32Value("global.port", &metricOptions->port)); + LOG_IF(FATAL, !conf->GetStringValue("global.ip", &metricOptions->ip)); + LOG_IF(FATAL, + !conf->GetBoolValue("metric.onoff", &metricOptions->collectMetric)); } -void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { - // 如果命令行有设置, 命令行覆盖配置文件中的字段 +void ChunkServer::LoadConfigFromCmdline(common::Configuration* conf) { + // If there are settings on the command line, the command line overwrites + // the fields in the configuration file google::CommandLineFlagInfo info; if (GetCommandLineFlagInfo("chunkServerIp", &info) && !info.is_default) { conf->SetStringValue("global.ip", FLAGS_chunkServerIp); } else { LOG(FATAL) - << "chunkServerIp must be set when run chunkserver in command."; + << "chunkServerIp must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("enableExternalServer", &info) && - !info.is_default) { - conf->SetBoolValue( - "global.enable_external_server", FLAGS_enableExternalServer); + !info.is_default) { + conf->SetBoolValue("global.enable_external_server", + FLAGS_enableExternalServer); } if (GetCommandLineFlagInfo("chunkServerExternalIp", &info) && - !info.is_default) { + !info.is_default) { conf->SetStringValue("global.external_ip", FLAGS_chunkServerExternalIp); } @@ -850,23 +843,23 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetIntValue("global.port", FLAGS_chunkServerPort); } else { LOG(FATAL) - << "chunkServerPort must be set when run chunkserver in command."; + << "chunkServerPort must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("chunkServerStoreUri", &info) && !info.is_default) { conf->SetStringValue("chunkserver.stor_uri", FLAGS_chunkServerStoreUri); } else { - LOG(FATAL) - << "chunkServerStoreUri must be set when run chunkserver in command."; + LOG(FATAL) << "chunkServerStoreUri must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("chunkServerMetaUri", &info) && !info.is_default) { conf->SetStringValue("chunkserver.meta_uri", FLAGS_chunkServerMetaUri); } else { - LOG(FATAL) - << "chunkServerMetaUri must be set when run chunkserver in command."; + LOG(FATAL) << "chunkServerMetaUri must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("copySetUri", &info) && !info.is_default) { @@ -875,39 +868,33 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { conf->SetStringValue("copyset.raft_snapshot_uri", FLAGS_copySetUri); conf->SetStringValue("copyset.raft_meta_uri", FLAGS_copySetUri); } else { - LOG(FATAL) - << "copySetUri must be set when run chunkserver in command."; + LOG(FATAL) << "copySetUri must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("raftSnapshotUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_snapshot_uri", FLAGS_raftSnapshotUri); + conf->SetStringValue("copyset.raft_snapshot_uri", + FLAGS_raftSnapshotUri); } else { LOG(FATAL) - << "raftSnapshotUri must be set when run chunkserver in command."; + << "raftSnapshotUri must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("raftLogUri", &info) && !info.is_default) { - conf->SetStringValue( - "copyset.raft_log_uri", FLAGS_raftLogUri); + conf->SetStringValue("copyset.raft_log_uri", FLAGS_raftLogUri); } else { - LOG(FATAL) - << "raftLogUri must be set when run chunkserver in command."; + LOG(FATAL) << "raftLogUri must be set when run chunkserver in command."; } - if (GetCommandLineFlagInfo("recycleUri", &info) && - !info.is_default) { + if (GetCommandLineFlagInfo("recycleUri", &info) && !info.is_default) { conf->SetStringValue("copyset.recycler_uri", FLAGS_recycleUri); } else { - LOG(FATAL) - << "recycleUri must be set when run chunkserver in command."; + LOG(FATAL) << "recycleUri must be set when run chunkserver in command."; } - if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "chunkfilepool.chunk_file_pool_dir", FLAGS_chunkFilePoolDir); + if (GetCommandLineFlagInfo("chunkFilePoolDir", &info) && !info.is_default) { + conf->SetStringValue("chunkfilepool.chunk_file_pool_dir", + FLAGS_chunkFilePoolDir); } else { LOG(FATAL) - << "chunkFilePoolDir must be set when run chunkserver in command."; + << "chunkFilePoolDir must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("chunkFilePoolAllocatedPercent", &info)) { @@ -922,38 +909,37 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { if (GetCommandLineFlagInfo("chunkFilePoolMetaPath", &info) && !info.is_default) { - conf->SetStringValue( - "chunkfilepool.meta_path", FLAGS_chunkFilePoolMetaPath); + conf->SetStringValue("chunkfilepool.meta_path", + FLAGS_chunkFilePoolMetaPath); } else { - LOG(FATAL) - << "chunkFilePoolMetaPath must be set when run chunkserver in command."; + LOG(FATAL) << "chunkFilePoolMetaPath must be set when run chunkserver " + "in command."; } - if (GetCommandLineFlagInfo("walFilePoolDir", &info) && - !info.is_default) { - conf->SetStringValue( - "walfilepool.file_pool_dir", FLAGS_walFilePoolDir); + if (GetCommandLineFlagInfo("walFilePoolDir", &info) && !info.is_default) { + conf->SetStringValue("walfilepool.file_pool_dir", FLAGS_walFilePoolDir); } else { LOG(FATAL) - << "walFilePoolDir must be set when run chunkserver in command."; + << "walFilePoolDir must be set when run chunkserver in command."; } if (GetCommandLineFlagInfo("walFilePoolMetaPath", &info) && !info.is_default) { - conf->SetStringValue( - "walfilepool.meta_path", FLAGS_walFilePoolMetaPath); + conf->SetStringValue("walfilepool.meta_path", + FLAGS_walFilePoolMetaPath); } else { - LOG(FATAL) - << "walFilePoolMetaPath must be set when run chunkserver in command."; + LOG(FATAL) << "walFilePoolMetaPath must be set when run chunkserver in " + "command."; } if (GetCommandLineFlagInfo("mdsListenAddr", &info) && !info.is_default) { conf->SetStringValue("mds.listen.addr", FLAGS_mdsListenAddr); } - // 设置日志存放文件夹 + // Set log storage folder if (FLAGS_log_dir.empty()) { - if (!conf->GetStringValue("chunkserver.common.logDir", &FLAGS_log_dir)) { // NOLINT + if (!conf->GetStringValue("chunkserver.common.logDir", + &FLAGS_log_dir)) { // NOLINT LOG(WARNING) << "no chunkserver.common.logDir in " << FLAGS_conf << ", will log to /tmp"; } @@ -962,42 +948,40 @@ void ChunkServer::LoadConfigFromCmdline(common::Configuration *conf) { if (GetCommandLineFlagInfo("enableChunkfilepool", &info) && !info.is_default) { conf->SetBoolValue("chunkfilepool.enable_get_chunk_from_pool", - FLAGS_enableChunkfilepool); + FLAGS_enableChunkfilepool); } if (GetCommandLineFlagInfo("enableWalfilepool", &info) && !info.is_default) { conf->SetBoolValue("walfilepool.enable_get_segment_from_pool", - FLAGS_enableWalfilepool); + FLAGS_enableWalfilepool); } if (GetCommandLineFlagInfo("copysetLoadConcurrency", &info) && !info.is_default) { conf->SetIntValue("copyset.load_concurrency", - FLAGS_copysetLoadConcurrency); + FLAGS_copysetLoadConcurrency); } } int ChunkServer::GetChunkServerMetaFromLocal( - const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata) { + const std::string& storeUri, const std::string& metaUri, + const std::shared_ptr& fs, ChunkServerMetadata* metadata) { std::string proto = UriParser::GetProtocolFromUri(storeUri); if (proto != "local") { LOG(ERROR) << "Datastore protocal " << proto << " is not supported yet"; return -1; } - // 从配置文件中获取chunkserver元数据的文件路径 + // Obtain the file path for chunkserver metadata from the configuration file proto = UriParser::GetProtocolFromUri(metaUri); if (proto != "local") { - LOG(ERROR) << "Chunkserver meta protocal " - << proto << " is not supported yet"; + LOG(ERROR) << "Chunkserver meta protocal " << proto + << " is not supported yet"; return -1; } - // 元数据文件已经存在 + // The metadata file already exists if (fs->FileExists(UriParser::GetPathFromUri(metaUri).c_str())) { - // 获取文件内容 + // Get File Content if (ReadChunkServerMeta(fs, metaUri, metadata) != 0) { LOG(ERROR) << "Fail to read persisted chunkserver meta data"; return -1; @@ -1011,8 +995,9 @@ int ChunkServer::GetChunkServerMetaFromLocal( return -1; } -int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata) { +int ChunkServer::ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata) { int fd; std::string metaFile = UriParser::GetPathFromUri(metaUri); @@ -1022,7 +1007,7 @@ int ChunkServer::ReadChunkServerMeta(const std::shared_ptr &fs, return -1; } - #define METAFILE_MAX_SIZE 4096 +#define METAFILE_MAX_SIZE 4096 int size; char json[METAFILE_MAX_SIZE] = {0}; diff --git a/src/chunkserver/chunkserver.h b/src/chunkserver/chunkserver.h index b9e9005545..6698281fec 100644 --- a/src/chunkserver/chunkserver.h +++ b/src/chunkserver/chunkserver.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_H_ -#include #include -#include "src/common/configuration.h" +#include + +#include "src/chunkserver/chunkserver_metrics.h" +#include "src/chunkserver/clone_manager.h" +#include "src/chunkserver/concurrent_apply/concurrent_apply.h" #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/heartbeat.h" -#include "src/chunkserver/scan_manager.h" -#include "src/chunkserver/clone_manager.h" #include "src/chunkserver/register.h" -#include "src/chunkserver/trash.h" -#include "src/chunkserver/chunkserver_metrics.h" -#include "src/chunkserver/concurrent_apply/concurrent_apply.h" +#include "src/chunkserver/scan_manager.h" #include "src/chunkserver/scan_service.h" +#include "src/chunkserver/trash.h" +#include "src/common/configuration.h" using ::curve::chunkserver::concurrent::ConcurrentApplyOption; @@ -43,81 +44,84 @@ namespace chunkserver { class ChunkServer { public: /** - * @brief 初始化Chunkserve各子模块 + * @brief Initialize Chunkserve sub modules * - * @param[in] argc 命令行参数总数 - * @param[in] argv 命令行参数列表 + * @param[in] argc Total number of command line arguments + * @param[in] argv command line argument list * - * @return 0表示成功,非0失败 + * @return 0 indicates success, non 0 indicates failure */ int Run(int argc, char** argv); /** - * @brief 停止chunkserver,结束各子模块 + * @brief: Stop chunkserver and end each sub module */ void Stop(); private: - void InitChunkFilePoolOptions(common::Configuration *conf, - FilePoolOptions *chunkFilePoolOptions); + void InitChunkFilePoolOptions(common::Configuration* conf, + FilePoolOptions* chunkFilePoolOptions); - void InitWalFilePoolOptions(common::Configuration *conf, - FilePoolOptions *walPoolOption); + void InitWalFilePoolOptions(common::Configuration* conf, + FilePoolOptions* walPoolOption); - void InitConcurrentApplyOptions(common::Configuration *conf, - ConcurrentApplyOption *concurrentApplyOption); + void InitConcurrentApplyOptions( + common::Configuration* conf, + ConcurrentApplyOption* concurrentApplyOption); - void InitCopysetNodeOptions(common::Configuration *conf, - CopysetNodeOptions *copysetNodeOptions); + void InitCopysetNodeOptions(common::Configuration* conf, + CopysetNodeOptions* copysetNodeOptions); - void InitCopyerOptions(common::Configuration *conf, - CopyerOptions *copyerOptions); + void InitCopyerOptions(common::Configuration* conf, + CopyerOptions* copyerOptions); - void InitCloneOptions(common::Configuration *conf, - CloneOptions *cloneOptions); + void InitCloneOptions(common::Configuration* conf, + CloneOptions* cloneOptions); - void InitScanOptions(common::Configuration *conf, - ScanManagerOptions *scanOptions); + void InitScanOptions(common::Configuration* conf, + ScanManagerOptions* scanOptions); - void InitHeartbeatOptions(common::Configuration *conf, - HeartbeatOptions *heartbeatOptions); + void InitHeartbeatOptions(common::Configuration* conf, + HeartbeatOptions* heartbeatOptions); - void InitRegisterOptions(common::Configuration *conf, - RegisterOptions *registerOptions); + void InitRegisterOptions(common::Configuration* conf, + RegisterOptions* registerOptions); - void InitTrashOptions(common::Configuration *conf, - TrashOptions *trashOptions); + void InitTrashOptions(common::Configuration* conf, + TrashOptions* trashOptions); - void InitMetricOptions(common::Configuration *conf, - ChunkServerMetricOptions *metricOptions); + void InitMetricOptions(common::Configuration* conf, + ChunkServerMetricOptions* metricOptions); - void LoadConfigFromCmdline(common::Configuration *conf); + void LoadConfigFromCmdline(common::Configuration* conf); - int GetChunkServerMetaFromLocal(const std::string &storeUri, - const std::string &metaUri, - const std::shared_ptr &fs, - ChunkServerMetadata *metadata); + int GetChunkServerMetaFromLocal(const std::string& storeUri, + const std::string& metaUri, + const std::shared_ptr& fs, + ChunkServerMetadata* metadata); - int ReadChunkServerMeta(const std::shared_ptr &fs, - const std::string &metaUri, ChunkServerMetadata *metadata); + int ReadChunkServerMeta(const std::shared_ptr& fs, + const std::string& metaUri, + ChunkServerMetadata* metadata); private: - // copysetNodeManager_ 管理chunkserver上所有copysetNode + // copysetNodeManager_ Manage all copysetNodes on the chunkserver CopysetNodeManager* copysetNodeManager_; - // cloneManager_ 管理克隆任务 + // cloneManager_ Manage Clone Tasks CloneManager cloneManager_; // scan copyset manager ScanManager scanManager_; - // heartbeat_ 负责向mds定期发送心跳,并下发心跳中任务 + // heartbeat_ Responsible for regularly sending heartbeat to MDS and issuing + // tasks in the heartbeat Heartbeat heartbeat_; - // trash_ 定期回收垃圾站中的物理空间 + // trash_ Regularly recycle physical space in the garbage bin std::shared_ptr trash_; - // install snapshot流控 + // install snapshot flow control scoped_refptr snapshotThrottle_; }; @@ -125,4 +129,3 @@ class ChunkServer { } // namespace curve #endif // SRC_CHUNKSERVER_CHUNKSERVER_H_ - diff --git a/src/chunkserver/chunkserver_helper.cpp b/src/chunkserver/chunkserver_helper.cpp index cf12df7f67..96afcf39e8 100644 --- a/src/chunkserver/chunkserver_helper.cpp +++ b/src/chunkserver/chunkserver_helper.cpp @@ -20,19 +20,20 @@ * Author: lixiaocui */ -#include -#include +#include "src/chunkserver/chunkserver_helper.h" + #include +#include +#include #include "src/common/crc32.h" -#include "src/chunkserver/chunkserver_helper.h" namespace curve { namespace chunkserver { const uint64_t DefaultMagic = 0x6225929368674118; bool ChunkServerMetaHelper::EncodeChunkServerMeta( - const ChunkServerMetadata &meta, std::string *out) { + const ChunkServerMetadata& meta, std::string* out) { if (!out->empty()) { LOG(ERROR) << "out string must empty!"; return false; @@ -50,8 +51,8 @@ bool ChunkServerMetaHelper::EncodeChunkServerMeta( return true; } -bool ChunkServerMetaHelper::DecodeChunkServerMeta( - const std::string &meta, ChunkServerMetadata *out) { +bool ChunkServerMetaHelper::DecodeChunkServerMeta(const std::string& meta, + ChunkServerMetadata* out) { std::string jsonStr(meta); std::string err; json2pb::Json2PbOptions opt; @@ -63,7 +64,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return false; } - // 验证meta是否正确 + // Verify if the meta is correct uint32_t crc = MetadataCrc(*out); if (crc != out->checksum()) { LOG(ERROR) << "ChunkServer persisted metadata CRC dismatch." @@ -75,8 +76,7 @@ bool ChunkServerMetaHelper::DecodeChunkServerMeta( return true; } -uint32_t ChunkServerMetaHelper::MetadataCrc( - const ChunkServerMetadata &meta) { +uint32_t ChunkServerMetaHelper::MetadataCrc(const ChunkServerMetadata& meta) { uint32_t crc = 0; uint32_t ver = meta.version(); uint32_t id = meta.id(); @@ -87,7 +87,7 @@ uint32_t ChunkServerMetaHelper::MetadataCrc( crc = curve::common::CRC32(crc, reinterpret_cast(&id), sizeof(id)); crc = curve::common::CRC32(crc, token, meta.token().size()); crc = curve::common::CRC32(crc, reinterpret_cast(&magic), - sizeof(magic)); + sizeof(magic)); return crc; } diff --git a/src/chunkserver/chunkserver_main.cpp b/src/chunkserver/chunkserver_main.cpp index 5bc4cb8736..4c13bf64d8 100644 --- a/src/chunkserver/chunkserver_main.cpp +++ b/src/chunkserver/chunkserver_main.cpp @@ -26,7 +26,7 @@ int main(int argc, char* argv[]) { butil::AtExitManager atExitManager; ::curve::chunkserver::ChunkServer chunkserver; LOG(INFO) << "ChunkServer starting."; - // 这里不能用fork创建守护进程,bvar会存在一些问题 + // You cannot use fork to create daemons here, as bvar may have some issues // https://github.com/apache/incubator-brpc/issues/697 // https://github.com/apache/incubator-brpc/issues/208 chunkserver.Run(argc, argv); diff --git a/src/chunkserver/chunkserver_metrics.cpp b/src/chunkserver/chunkserver_metrics.cpp index 339ecbbe66..f8a361d94e 100644 --- a/src/chunkserver/chunkserver_metrics.cpp +++ b/src/chunkserver/chunkserver_metrics.cpp @@ -21,8 +21,9 @@ */ #include "src/chunkserver/chunkserver_metrics.h" -#include + #include +#include #include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/passive_getfn.h" @@ -31,13 +32,15 @@ namespace curve { namespace chunkserver { IOMetric::IOMetric() - : rps_(&reqNum_, 1), iops_(&ioNum_, 1), eps_(&errorNum_, 1), + : rps_(&reqNum_, 1), + iops_(&ioNum_, 1), + eps_(&errorNum_, 1), bps_(&ioBytes_, 1) {} IOMetric::~IOMetric() {} -int IOMetric::Init(const std::string &prefix) { - // 暴露所有的metric +int IOMetric::Init(const std::string& prefix) { + // Expose all metrics if (reqNum_.expose_as(prefix, "request_num") != 0) { LOG(ERROR) << "expose request num failed."; return -1; @@ -94,9 +97,8 @@ void IOMetric::OnResponse(size_t size, int64_t latUs, bool hasError) { } } - -int CSIOMetric::Init(const std::string &prefix) { - // 初始化io统计项metric +int CSIOMetric::Init(const std::string& prefix) { + // Initialize IO statistics item metric std::string readPrefix = prefix + "_read"; std::string writePrefix = prefix + "_write"; std::string recoverPrefix = prefix + "_recover"; @@ -161,30 +163,30 @@ void CSIOMetric::OnResponse(CSIOMetricType type, size_t size, int64_t latUs, IOMetricPtr CSIOMetric::GetIOMetric(CSIOMetricType type) { IOMetricPtr result = nullptr; switch (type) { - case CSIOMetricType::READ_CHUNK: - result = readMetric_; - break; - case CSIOMetricType::WRITE_CHUNK: - result = writeMetric_; - break; - case CSIOMetricType::RECOVER_CHUNK: - result = recoverMetric_; - break; - case CSIOMetricType::PASTE_CHUNK: - result = pasteMetric_; - break; - case CSIOMetricType::DOWNLOAD: - result = downloadMetric_; - break; - default: - result = nullptr; - break; + case CSIOMetricType::READ_CHUNK: + result = readMetric_; + break; + case CSIOMetricType::WRITE_CHUNK: + result = writeMetric_; + break; + case CSIOMetricType::RECOVER_CHUNK: + result = recoverMetric_; + break; + case CSIOMetricType::PASTE_CHUNK: + result = pasteMetric_; + break; + case CSIOMetricType::DOWNLOAD: + result = downloadMetric_; + break; + default: + result = nullptr; + break; } return result; } -int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int CSCopysetMetric::Init(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { logicPoolId_ = logicPoolId; copysetId_ = copysetId; int ret = ioMetrics_.Init(Prefix()); @@ -196,7 +198,7 @@ int CSCopysetMetric::Init(const LogicPoolID &logicPoolId, return 0; } -void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { +void CSCopysetMetric::MonitorDataStore(CSDataStore* datastore) { std::string chunkCountPrefix = Prefix() + "_chunk_count"; std::string snapshotCountPrefix = Prefix() + "snapshot_count"; std::string cloneChunkCountPrefix = Prefix() + "_clonechunk_count"; @@ -209,30 +211,36 @@ void CSCopysetMetric::MonitorDataStore(CSDataStore *datastore) { } void CSCopysetMetric::MonitorCurveSegmentLogStorage( - CurveSegmentLogStorage *logStorage) { + CurveSegmentLogStorage* logStorage) { std::string walSegmentCountPrefix = Prefix() + "_walsegment_count"; walSegmentCount_ = std::make_shared>( walSegmentCountPrefix, GetLogStorageWalSegmentCountFunc, logStorage); } ChunkServerMetric::ChunkServerMetric() - : hasInited_(false), leaderCount_(nullptr), chunkLeft_(nullptr), - walSegmentLeft_(nullptr), chunkTrashed_(nullptr), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : hasInited_(false), + leaderCount_(nullptr), + chunkLeft_(nullptr), + walSegmentLeft_(nullptr), + chunkTrashed_(nullptr), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} -ChunkServerMetric *ChunkServerMetric::self_ = nullptr; +ChunkServerMetric* ChunkServerMetric::self_ = nullptr; -ChunkServerMetric *ChunkServerMetric::GetInstance() { - // chunkserver metric 在chunkserver启动时初始化创建 - // 因此创建的时候不会存在竞争,不需要锁保护 +ChunkServerMetric* ChunkServerMetric::GetInstance() { + // Chunkserver metric initializes creation when chunkserver starts + // Therefore, there will be no competition during creation and lock + // protection is not required if (self_ == nullptr) { self_ = new ChunkServerMetric; } return self_; } -int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { +int ChunkServerMetric::Init(const ChunkServerMetricOptions& option) { if (hasInited_) { LOG(WARNING) << "chunkserver metric has inited."; return 0; @@ -245,14 +253,14 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { return 0; } - // 初始化io统计项metric + // Initialize IO statistics item metric int ret = ioMetrics_.Init(Prefix()); if (ret < 0) { LOG(ERROR) << "Init chunkserver metric failed."; return -1; } - // 初始化资源统计 + // Initialize resource statistics std::string leaderCountPrefix = Prefix() + "_leader_count"; leaderCount_ = std::make_shared>(leaderCountPrefix); @@ -278,7 +286,7 @@ int ChunkServerMetric::Init(const ChunkServerMetricOptions &option) { } int ChunkServerMetric::Fini() { - // 释放资源,从而将暴露的metric从全局的map中移除 + // Release resources to remove exposed metrics from the global map ioMetrics_.Fini(); leaderCount_ = nullptr; chunkLeft_ = nullptr; @@ -293,8 +301,8 @@ int ChunkServerMetric::Fini() { return 0; } -int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { if (!option_.collectMetric) { return 0; } @@ -321,9 +329,8 @@ int ChunkServerMetric::CreateCopysetMetric(const LogicPoolID &logicPoolId, return 0; } -CopysetMetricPtr -ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +CopysetMetricPtr ChunkServerMetric::GetCopysetMetric( + const LogicPoolID& logicPoolId, const CopysetID& copysetId) { if (!option_.collectMetric) { return nullptr; } @@ -332,18 +339,18 @@ ChunkServerMetric::GetCopysetMetric(const LogicPoolID &logicPoolId, return copysetMetricMap_.Get(groupId); } -int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId) { +int ChunkServerMetric::RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId) { GroupId groupId = ToGroupId(logicPoolId, copysetId); - // 这里先保存copyset metric,等remove后再去释放 - // 防止在读写锁里面去操作metric,导致死锁 + // Save the copyset metric here first, and then release it after removing it + // Prevent operating metrics within read write locks, resulting in deadlocks auto metric = copysetMetricMap_.Get(groupId); copysetMetricMap_.Remove(groupId); return 0; } -void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnRequest(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type) { if (!option_.collectMetric) { return; @@ -356,8 +363,8 @@ void ChunkServerMetric::OnRequest(const LogicPoolID &logicPoolId, ioMetrics_.OnRequest(type); } -void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, - const CopysetID ©setId, +void ChunkServerMetric::OnResponse(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { if (!option_.collectMetric) { @@ -371,7 +378,7 @@ void ChunkServerMetric::OnResponse(const LogicPoolID &logicPoolId, ioMetrics_.OnResponse(type, size, latUs, hasError); } -void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { +void ChunkServerMetric::MonitorChunkFilePool(FilePool* chunkFilePool) { if (!option_.collectMetric) { return; } @@ -381,7 +388,7 @@ void ChunkServerMetric::MonitorChunkFilePool(FilePool *chunkFilePool) { chunkLeftPrefix, GetChunkLeftFunc, chunkFilePool); } -void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { +void ChunkServerMetric::MonitorWalFilePool(FilePool* walFilePool) { if (!option_.collectMetric) { return; } @@ -391,7 +398,7 @@ void ChunkServerMetric::MonitorWalFilePool(FilePool *walFilePool) { walSegmentLeftPrefix, GetWalSegmentLeftFunc, walFilePool); } -void ChunkServerMetric::MonitorTrash(Trash *trash) { +void ChunkServerMetric::MonitorTrash(Trash* trash) { if (!option_.collectMetric) { return; } @@ -417,7 +424,7 @@ void ChunkServerMetric::DecreaseLeaderCount() { *leaderCount_ << -1; } -void ChunkServerMetric::ExposeConfigMetric(common::Configuration *conf) { +void ChunkServerMetric::ExposeConfigMetric(common::Configuration* conf) { if (!option_.collectMetric) { return; } diff --git a/src/chunkserver/chunkserver_metrics.h b/src/chunkserver/chunkserver_metrics.h index d4354d196f..c2fdb91823 100644 --- a/src/chunkserver/chunkserver_metrics.h +++ b/src/chunkserver/chunkserver_metrics.h @@ -23,18 +23,19 @@ #ifndef SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ #define SRC_CHUNKSERVER_CHUNKSERVER_METRICS_H_ -#include #include +#include + +#include #include #include -#include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" +#include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/configuration.h" -#include "src/chunkserver/datastore/file_pool.h" +#include "src/common/uncopyable.h" using curve::common::Configuration; using curve::common::ReadLockGuard; @@ -54,57 +55,59 @@ class Trash; template using PassiveStatusPtr = std::shared_ptr>; -template using AdderPtr = std::shared_ptr>; +template +using AdderPtr = std::shared_ptr>; -// 使用LatencyRecorder的实现来统计读写请求的size情况 -// 可以统计分位值、最大值、中位数、平均值等情况 +// Using the implementation of LatencyRecorder to count the size of read and +// write requests Statistics can be conducted on quantile values, maximum +// values, median values, mean values, and other factors using IOSizeRecorder = bvar::LatencyRecorder; -// io 相关的统计项 +// IO related statistical items class IOMetric { public: IOMetric(); virtual ~IOMetric(); /** - * 初始化 io metric - * 主要用于曝光各metric指标 - * @param prefix: 用于bvar曝光时使用的前缀 - * @return 成功返回0,失败返回-1 + * Initialize io metric + * Mainly used for exposing various metric indicators + * @param prefix: The prefix used for bvar exposure + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * IO请求到来时统计requestNum + * Count requestNum when IO requests arrive */ void OnRequest(); /** - * IO 完成以后,记录该次IO的指标 - * 错误的io不会计入iops和bps统计 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * After IO is completed, record the indicators for this IO + * Incorrect IO will not be included in iops and bps statistics + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(size_t size, int64_t latUs, bool hasError); public: - // io请求的数量 + // Number of IO requests bvar::Adder reqNum_; - // 成功io的数量 + // Number of successful IO bvar::Adder ioNum_; - // 失败的io个数 + // Number of failed IO bvar::Adder errorNum_; - // 所有io的数据量 + // The data volume of all IO bvar::Adder ioBytes_; - // io的延时情况(分位值、最大值、中位数、平均值) + // Delay situation of IO (quantile, maximum, median, average) bvar::LatencyRecorder latencyRecorder_; - // io大小的情况(分位值、最大值、中位数、平均值) + // The size of IO (quantile, maximum, median, average) IOSizeRecorder sizeRecorder_; - // 最近1秒请求的IO数量 + // Number of IO requests in the last 1 second bvar::PerSecond> rps_; - // 最近1秒的iops + // iops in the last 1 second bvar::PerSecond> iops_; - // 最近1秒的出错IO数量 + // Number of IO errors in the last 1 second bvar::PerSecond> eps_; - // 最近1秒的数据量 + // Data volume in the last 1 second bvar::PerSecond> bps_; }; using IOMetricPtr = std::shared_ptr; @@ -120,100 +123,109 @@ enum class CSIOMetricType { class CSIOMetric { public: CSIOMetric() - : readMetric_(nullptr), writeMetric_(nullptr), recoverMetric_(nullptr), - pasteMetric_(nullptr), downloadMetric_(nullptr) {} + : readMetric_(nullptr), + writeMetric_(nullptr), + recoverMetric_(nullptr), + pasteMetric_(nullptr), + downloadMetric_(nullptr) {} ~CSIOMetric() {} /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type); /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type); /** - * 初始化各项op的metric统计项 - * @return 成功返回0,失败返回-1 + * Initialize metric statistics for each op + * @return returns 0 for success, -1 for failure */ - int Init(const std::string &prefix); + int Init(const std::string& prefix); /** - * 释放各项op的metric资源 + * Release metric resources for various OPs */ void Fini(); protected: - // ReadChunk统计 + // ReadChunk statistics IOMetricPtr readMetric_; - // WriteChunk统计 + // WriteChunk statistics IOMetricPtr writeMetric_; - // RecoverChunk统计 + // RecoverChunk statistics IOMetricPtr recoverMetric_; - // PasteChunk信息 + // PasteChunk Information IOMetricPtr pasteMetric_; - // Download统计 + // Download statistics IOMetricPtr downloadMetric_; }; class CSCopysetMetric { public: CSCopysetMetric() - : logicPoolId_(0), copysetId_(0), chunkCount_(nullptr), - walSegmentCount_(nullptr), snapshotCount_(nullptr), + : logicPoolId_(0), + copysetId_(0), + chunkCount_(nullptr), + walSegmentCount_(nullptr), + snapshotCount_(nullptr), cloneChunkCount_(nullptr) {} ~CSCopysetMetric() {} /** - * 初始化copyset级别的metric统计项 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Initialize metric statistics at the copyset level + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int Init(const LogicPoolID &logicPoolId, const CopysetID ©setId); + int Init(const LogicPoolID& logicPoolId, const CopysetID& copysetId); /** - * 监控DataStore指标,主要包括chunk的数量、快照的数量等 - * @param datastore: 该copyset下的datastore指针 + * Monitor DataStore indicators, mainly including the number of chunks, + * number of snapshots, etc + * @param datastore: The datastore pointer under this copyset */ - void MonitorDataStore(CSDataStore *datastore); + void MonitorDataStore(CSDataStore* datastore); /** * @brief: Monitor log storage's metric, like the number of WAL segment file * @param logStorage: The pointer to CurveSegmentLogStorage */ - void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage *logStorage); + void MonitorCurveSegmentLogStorage(CurveSegmentLogStorage* logStorage); /** - * 执行请求前记录metric - * @param type: 请求对应的metric类型 + * Record metric before executing the request + * @param type: The corresponding metric type of the request */ void OnRequest(CSIOMetricType type) { ioMetrics_.OnRequest(type); } /** - * 执行请求后记录metric - * 错误的io不会计入iops和bps统计 - * @param type: 请求对应的metric类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record metric after executing the request + * Incorrect IO will not be included in iops and bps statistics + * @param type: The corresponding metric type of the request + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ void OnResponse(CSIOMetricType type, size_t size, int64_t latUs, bool hasError) { @@ -221,9 +233,10 @@ class CSCopysetMetric { } /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); @@ -264,27 +277,27 @@ class CSCopysetMetric { } private: - // 逻辑池id + // Logical Pool ID LogicPoolID logicPoolId_; // copyset id CopysetID copysetId_; - // copyset上的 chunk 的数量 + // Number of chunks on copyset PassiveStatusPtr chunkCount_; // The total number of WAL segment in copyset PassiveStatusPtr walSegmentCount_; - // copyset上的 快照文件 的数量 + // Number of snapshot files on copyset PassiveStatusPtr snapshotCount_; - // copyset上的 clone chunk 的数量 + // The number of clone chunks on the copyset PassiveStatusPtr cloneChunkCount_; - // copyset上的IO类型的metric统计 + // Metric statistics of IO types on copyset CSIOMetric ioMetrics_; }; struct ChunkServerMetricOptions { bool collectMetric; - // chunkserver的ip + // Chunkserver IP std::string ip; - // chunkserver的端口号 + // The port number of chunkserver uint32_t port; ChunkServerMetricOptions() : collectMetric(false), ip("127.0.0.1"), port(8888) {} @@ -344,173 +357,175 @@ class CopysetMetricMap { } private: - // 保护复制组metric map的读写锁 + // Protect the read write lock of the replication group metric map RWLock rwLock_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key std::unordered_map map_; }; class ChunkServerMetric : public Uncopyable { public: - // 实现单例 - static ChunkServerMetric *GetInstance(); + // Implementation singleton + static ChunkServerMetric* GetInstance(); /** - * 初始化chunkserver统计项 - * @pa)ram option: 初始化配置项 - * @return 成功返回0,失败返回-1 + * Initialize chunkserver statistics + * @param option: Initialize configuration item + * @return returns 0 for success, -1 for failure */ - int Init(const ChunkServerMetricOptions &option); + int Init(const ChunkServerMetricOptions& option); /** - * 释放metric资源 - * @return 成功返回0,失败返回-1 + * Release metric resources + * @return returns 0 for success, -1 for failure */ int Fini(); /** - * 请求前记录metric - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 + * Record metric before request + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type */ - void OnRequest(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnRequest(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type); /** - * 请求结束时记录该次IO指标 - * 错误的io不会计入iops和bps统计 - * @param logicPoolId: 此次io操作所在的逻辑池id - * @param copysetId: 此次io操作所在的copysetid - * @param type: 请求类型 - * @param size: 此次io数据的大小 - * @param latUS: 此次io的延时 - * @param hasError: 此次io是否有错误产生 + * Record the IO metric at the end of the request + * Incorrect IO will not be included in iops and bps statistics + * @param logicPoolId: The logical pool ID where this io operation is + * located + * @param copysetId: The copysetID where this io operation is located + * @param type: Request type + * @param size: The size of the IO data for this time + * @param latUS: The delay of this IO + * @param hasError: Did any errors occur during this IO */ - void OnResponse(const LogicPoolID &logicPoolId, const CopysetID ©setId, + void OnResponse(const LogicPoolID& logicPoolId, const CopysetID& copysetId, CSIOMetricType type, size_t size, int64_t latUs, bool hasError); /** - * 创建指定copyset的metric - * 如果collectMetric为false,返回0,但实际并不会创建 - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1,如果指定metric已存在返回失败 + * Create a metric for the specified copyset + * If collectMetric is false, it returns 0, but it is not actually created + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure, or failure if the + * specified metric already exists */ - int CreateCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int CreateCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 获取指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回指定的copyset metric,失败返回nullptr + * Obtain the metric of the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return successfully returns the specified copyset metric, while failure + * returns nullptr */ - CopysetMetricPtr GetCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + CopysetMetricPtr GetCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 删除指定copyset的metric - * @param logicPoolId: copyset所属逻辑池的id - * @param copysetId: copyset的id - * @return 成功返回0,失败返回-1 + * Delete the metric for the specified copyset + * @param logicPoolId: The ID of the logical pool to which the copyset + * belongs + * @param copysetId: The ID of the copyset + * @return returns 0 for success, -1 for failure */ - int RemoveCopysetMetric(const LogicPoolID &logicPoolId, - const CopysetID ©setId); + int RemoveCopysetMetric(const LogicPoolID& logicPoolId, + const CopysetID& copysetId); /** - * 监视chunk分配池,主要监视池中chunk的数量 - * @param chunkFilePool: chunkfilePool的对象指针 + * Monitor the chunk allocation pool, mainly monitoring the number of chunks + * in the pool + * @param chunkFilePool: Object pointer to chunkfilePool */ - void MonitorChunkFilePool(FilePool *chunkFilePool); + void MonitorChunkFilePool(FilePool* chunkFilePool); /** - * 监视wal segment分配池,主要监视池中segment的数量 - * @param walFilePool: walfilePool的对象指针 + * Monitor the allocation pool of wall segments, mainly monitoring the + * number of segments in the pool + * @param walFilePool: Object pointer to walfilePool */ - void MonitorWalFilePool(FilePool *walFilePool); + void MonitorWalFilePool(FilePool* walFilePool); /** - * 监视回收站 - * @param trash: trash的对象指针 + * Monitor Recycle Bin + * @param trash: Object pointer to trash */ - void MonitorTrash(Trash *trash); + void MonitorTrash(Trash* trash); /** - * 增加 leader count 计数 + * Increase the leader count count */ void IncreaseLeaderCount(); /** - * 减少 leader count 计数 + * Reduce leader count count */ void DecreaseLeaderCount(); /** - * 更新配置项数据 - * @param conf: 配置内容 + * Update configuration item data + * @param conf: Configuration content */ - void ExposeConfigMetric(common::Configuration *conf); + void ExposeConfigMetric(common::Configuration* conf); /** - * 获取指定类型的IOMetric - * @param type: 请求对应的metric类型 - * @return 返回指定类型对应的IOMetric指针,如果类型不存在则返回nullptr + * Obtain IOMetric of the specified type + * @param type: The corresponding metric type of the request + * @return returns the IOMetric pointer corresponding to the specified type, + * or nullptr if the type does not exist */ IOMetricPtr GetIOMetric(CSIOMetricType type) { return ioMetrics_.GetIOMetric(type); } - CopysetMetricMap *GetCopysetMetricMap() { return ©setMetricMap_; } + CopysetMetricMap* GetCopysetMetricMap() { return ©setMetricMap_; } uint32_t GetCopysetCount() { return copysetMetricMap_.Size(); } uint32_t GetLeaderCount() const { - if (leaderCount_ == nullptr) - return 0; + if (leaderCount_ == nullptr) return 0; return leaderCount_->get_value(); } uint32_t GetTotalChunkCount() { - if (chunkCount_ == nullptr) - return 0; + if (chunkCount_ == nullptr) return 0; return chunkCount_->get_value(); } uint32_t GetTotalSnapshotCount() { - if (snapshotCount_ == nullptr) - return 0; + if (snapshotCount_ == nullptr) return 0; return snapshotCount_->get_value(); } uint32_t GetTotalCloneChunkCount() { - if (cloneChunkCount_ == nullptr) - return 0; + if (cloneChunkCount_ == nullptr) return 0; return cloneChunkCount_->get_value(); } uint32_t GetTotalWalSegmentCount() { - if (nullptr == walSegmentCount_) - return 0; + if (nullptr == walSegmentCount_) return 0; return walSegmentCount_->get_value(); } uint32_t GetChunkLeftCount() const { - if (chunkLeft_ == nullptr) - return 0; + if (chunkLeft_ == nullptr) return 0; return chunkLeft_->get_value(); } uint32_t GetWalSegmentLeftCount() const { - if (nullptr == walSegmentLeft_) - return 0; + if (nullptr == walSegmentLeft_) return 0; return walSegmentLeft_->get_value(); } uint32_t GetChunkTrashedCount() const { - if (chunkTrashed_ == nullptr) - return 0; + if (chunkTrashed_ == nullptr) return 0; return chunkTrashed_->get_value(); } @@ -522,32 +537,32 @@ class ChunkServerMetric : public Uncopyable { } private: - // 初始化标志 + // Initialization flag bool hasInited_; - // 配置项 + // Configuration Item ChunkServerMetricOptions option_; - // leader 的数量 + // Number of leaders AdderPtr leaderCount_; - // chunkfilepool 中剩余的 chunk 的数量 + // The number of remaining chunks in the chunkfilepool PassiveStatusPtr chunkLeft_; - // walfilepool 中剩余的 wal segment 的数量 + // The number of remaining wal segments in the walfilepool PassiveStatusPtr walSegmentLeft_; - // trash 中的 chunk 的数量 + // Number of chunks in trash PassiveStatusPtr chunkTrashed_; - // chunkserver上的 chunk 的数量 + // Number of chunks on chunkserver PassiveStatusPtr chunkCount_; // The total number of WAL segment in chunkserver PassiveStatusPtr walSegmentCount_; - // chunkserver上的 快照文件 的数量 + // Number of snapshot files on chunkserver PassiveStatusPtr snapshotCount_; - // chunkserver上的 clone chunk 的数量 + // Number of clone chunks on chunkserver PassiveStatusPtr cloneChunkCount_; - // 各复制组metric的映射表,用GroupId作为key + // Mapping table for each replication group metric, using GroupId as the key CopysetMetricMap copysetMetricMap_; - // chunkserver上的IO类型的metric统计 + // Metric statistics of IO types on chunkserver CSIOMetric ioMetrics_; - // 用于单例模式的自指指针 - static ChunkServerMetric *self_; + // Self pointing pointer for singleton mode + static ChunkServerMetric* self_; }; } // namespace chunkserver diff --git a/src/chunkserver/cli2.h b/src/chunkserver/cli2.h index ba60e057e7..512850b747 100644 --- a/src/chunkserver/cli2.h +++ b/src/chunkserver/cli2.h @@ -33,57 +33,50 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader); - -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 变更配置 -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options); - -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options); - -// 重置复制组 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader); + +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options); + +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options); + +// Change configuration +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options); + +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options); + +// Reset replication group +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options); -// 给chunkserver上全部copyset副本触发快照 +// Trigger a snapshot for all copyset replicas on the chunkserver butil::Status SnapshotAll(const Peer& peer, const braft::cli::CliOptions& options); diff --git a/src/client/chunk_closure.cpp b/src/client/chunk_closure.cpp index 592e9d2a06..00af4a73d5 100644 --- a/src/client/chunk_closure.cpp +++ b/src/client/chunk_closure.cpp @@ -22,48 +22,58 @@ #include "src/client/chunk_closure.h" -#include -#include #include +#include +#include #include "src/client/client_common.h" #include "src/client/copyset_client.h" +#include "src/client/io_tracker.h" #include "src/client/metacache.h" #include "src/client/request_closure.h" #include "src/client/request_context.h" #include "src/client/service_helper.h" -#include "src/client/io_tracker.h" -// TODO(tongguangxun) :优化重试逻辑,将重试逻辑与RPC返回逻辑拆开 +// TODO(tongguangxun): Optimize retry logic by separating the retry logic from +// the RPC return logic namespace curve { namespace client { -ClientClosure::BackoffParam ClientClosure::backoffParam_; -FailureRequestOption ClientClosure::failReqOpt_; +ClientClosure::BackoffParam ClientClosure::backoffParam_; +FailureRequestOption ClientClosure::failReqOpt_; void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { RequestClosure* reqDone = static_cast(done_); - // 如果对应的cooysetId leader可能发生变更 - // 那么设置这次重试请求超时时间为默认值 - // 这是为了尽快重试这次请求 - // 从copysetleader迁移到client GetLeader获取到新的leader会有1~2s的延迟 - // 对于一个请求来说,GetLeader仍然可能返回旧的Leader - // rpc timeout时间可能会被设置成2s/4s,等到超时后再去获取leader信息 - // 为了尽快在新的Leader上重试请求,将rpc timeout时间设置为默认值 + // If the leader of the corresponding copysetId may change, + // set the retry request timeout to the default value. + // This is done to retry this request as soon as possible. + // When migrating from the copyset leader to obtaining a new leader + // through client GetLeader, there may be a delay of 1~2 seconds. + // For a given request, GetLeader may still return the old Leader. + // The RPC timeout may be set to 2s/4s, and it will be only after + // the timeout that the leader information is retrieved again. + // To promptly retry the request on the new Leader, set the RPC timeout + // to the default value. if (cntlstatus == brpc::ERPCTIMEDOUT || cntlstatus == ETIMEDOUT) { uint64_t nextTimeout = 0; uint64_t retriedTimes = reqDone->GetRetriedTimes(); bool leaderMayChange = metaCache_->IsLeaderMayChange( chunkIdInfo_.lpid_, chunkIdInfo_.cpid_); - // 当某一个IO重试超过一定次数后,超时时间一定进行指数退避 - // 当底层chunkserver压力大时,可能也会触发unstable - // 由于copyset leader may change,会导致请求超时时间设置为默认值 - // 而chunkserver在这个时间内处理不了,导致IO hang - // 真正宕机的情况下,请求重试一定次数后会处理完成 - // 如果一直重试,则不是宕机情况,这时候超时时间还是要进入指数退避逻辑 - if (retriedTimes < failReqOpt_.chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT + // When a certain IO retry exceeds a certain number of times, an + // exponential backoff must be performed during the timeout period When + // the underlying chunkserver is under high pressure, unstable may also + // be triggered Due to copyset leader may change, the request timeout + // time will be set to the default value And chunkserver cannot process + // it within this time, resulting in IO hang In the case of real + // downtime, the request will be processed after a certain number of + // retries If you keep trying again, it's not a downtime situation, and + // at this point, the timeout still needs to enter the exponential + // backoff logic + if (retriedTimes < + failReqOpt_ + .chunkserverMinRetryTimesForceTimeoutBackoff && // NOLINT leaderMayChange) { nextTimeout = failReqOpt_.chunkserverRPCTimeoutMS; } else { @@ -71,25 +81,23 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } reqDone->SetNextTimeOutMS(nextTimeout); - LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "rpc timeout, next timeout = " << nextTimeout << ", " + << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); return; } if (rpcstatus == CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD) { uint64_t nextsleeptime = OverLoadBackOff(reqDone->GetRetriedTimes()); LOG(WARNING) << "chunkserver overload, sleep(us) = " << nextsleeptime - << ", " << *reqCtx_ - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << ", " << *reqCtx_ + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); bthread_usleep(nextsleeptime); return; } @@ -103,19 +111,19 @@ void ClientClosure::PreProcessBeforeRetry(int rpcstatus, int cntlstatus) { } } - LOG(WARNING) - << "Rpc failed " - << (retryDirectly_ ? "retry directly, " - : "sleep " + std::to_string(nextSleepUS) + " us, ") - << *reqCtx_ << ", cntl status = " << cntlstatus - << ", response status = " - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(rpcstatus)) - << ", retried times = " << reqDone->GetRetriedTimes() - << ", IO id = " << reqDone->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << "Rpc failed " + << (retryDirectly_ + ? "retry directly, " + : "sleep " + std::to_string(nextSleepUS) + " us, ") + << *reqCtx_ << ", cntl status = " << cntlstatus + << ", response status = " + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast( + rpcstatus)) + << ", retried times = " << reqDone->GetRetriedTimes() + << ", IO id = " << reqDone->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (nextSleepUS != 0) { bthread_usleep(nextSleepUS); @@ -134,8 +142,11 @@ uint64_t ClientClosure::OverLoadBackOff(uint64_t currentRetryTimes) { random_time -= nextsleeptime / 10; nextsleeptime += random_time; - nextsleeptime = std::min(nextsleeptime, failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT - nextsleeptime = std::max(nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT + nextsleeptime = + std::min(nextsleeptime, + failReqOpt_.chunkserverMaxRetrySleepIntervalUS); // NOLINT + nextsleeptime = std::max( + nextsleeptime, failReqOpt_.chunkserverOPRetryIntervalUS); // NOLINT return nextsleeptime; } @@ -153,10 +164,11 @@ uint64_t ClientClosure::TimeoutBackOff(uint64_t currentRetryTimes) { return nextTimeout; } -// 统一请求回调函数入口 -// 整体处理逻辑与之前相同 -// 针对不同的请求类型和返回状态码,进行相应的处理 -// 各子类需要实现SendRetryRequest,进行重试请求 +// Unified entry point for request callback functions. +// The overall processing logic remains the same as before. +// Specific handling is performed based on different request types +// and response status codes. +// Subclasses need to implement SendRetryRequest for retrying requests. void ClientClosure::Run() { std::unique_ptr selfGuard(this); std::unique_ptr cntlGuard(cntl_); @@ -176,80 +188,81 @@ void ClientClosure::Run() { needRetry = true; OnRpcFailed(); } else { - // 只要rpc正常返回,就清空超时计数器 - metaCache_->GetUnstableHelper().ClearTimeout( - chunkserverID_, chunkserverEndPoint_); + // As long as RPC returns normally, clear the timeout counter + metaCache_->GetUnstableHelper().ClearTimeout(chunkserverID_, + chunkserverEndPoint_); status_ = GetResponseStatus(); switch (status_) { - // 1. 请求成功 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: - OnSuccess(); - break; - - // 2.1 不是leader - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: - MetricHelper::IncremRedirectRPCCount(fileMetric_, reqCtx_->optype_); - needRetry = true; - OnRedirected(); - break; - - // 2.2 Copyset不存在,大概率都是配置变更了 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: - needRetry = true; - OnCopysetNotExist(); - break; - - // 2.3 chunk not exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: - OnChunkNotExist(); - break; - - // 2.4 非法参数,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: - OnInvalidRequest(); - break; + // 1. Request successful + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS: + OnSuccess(); + break; + + // 2.1 is not a leader + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_REDIRECTED: + MetricHelper::IncremRedirectRPCCount(fileMetric_, + reqCtx_->optype_); + needRetry = true; + OnRedirected(); + break; - // 2.5 返回backward - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: - if (reqCtx_->optype_ == OpType::WRITE) { + // 2.2 Copyset does not exist, most likely due to configuration + // changes + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST: needRetry = true; - OnBackward(); - } else { - LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " return backward, " - << *reqCtx_ - << ", status=" << status_ + OnCopysetNotExist(); + break; + + // 2.3 Chunk not exist, return directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST: + OnChunkNotExist(); + break; + + // 2.4 Illegal parameter, returned directly without retry + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST: + OnInvalidRequest(); + break; + + // 2.5 Return to feedback + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_BACKWARD: + if (reqCtx_->optype_ == OpType::WRITE) { + needRetry = true; + OnBackward(); + } else { + LOG(ERROR) + << OpTypeToString(reqCtx_->optype_) + << " return backward, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); + } + break; + + // 2.6 Return Chunk Exist, directly return without retrying + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: + OnChunkExist(); + break; + + case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: + OnEpochTooOld(); + break; + + default: + needRetry = true; + LOG(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed for UNKNOWN reason, " << *reqCtx_ << ", status=" + << curve::chunkserver::CHUNK_OP_STATUS_Name( + static_cast(status_)) << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); - } - break; - - // 2.6 返回chunk exist,直接返回,不用重试 - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_EXIST: - OnChunkExist(); - break; - - case CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD: - OnEpochTooOld(); - break; - - default: - needRetry = true; - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed for UNKNOWN reason, " << *reqCtx_ - << ", status=" - << curve::chunkserver::CHUNK_OP_STATUS_Name( - static_cast(status_)) - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); } } @@ -264,22 +277,22 @@ void ClientClosure::OnRpcFailed() { status_ = cntl_->ErrorCode(); - // 如果连接失败,再等一定时间再重试 + // If the connection fails, wait for a certain amount of time before trying + // again if (cntlstatus_ == brpc::ERPCTIMEDOUT) { - // 如果RPC超时, 对应的chunkserver超时请求次数+1 + // If RPC times out, the corresponding number of chunkserver timeout + // requests+1 metaCache_->GetUnstableHelper().IncreTimeout(chunkserverID_); MetricHelper::IncremTimeOutRPCCount(fileMetric_, reqCtx_->optype_); } - LOG_EVERY_SECOND(WARNING) << OpTypeToString(reqCtx_->optype_) - << " failed, error code: " - << cntl_->ErrorCode() - << ", error: " << cntl_->ErrorText() - << ", " << *reqCtx_ + LOG_EVERY_SECOND(WARNING) + << OpTypeToString(reqCtx_->optype_) + << " failed, error code: " << cntl_->ErrorCode() + << ", error: " << cntl_->ErrorText() << ", " << *reqCtx_ << ", retried times = " << reqDone_->GetRetriedTimes() << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " + << ", request id = " << reqCtx_->id_ << ", remote side = " << butil::endpoint2str(cntl_->remote_side()).c_str(); ProcessUnstableState(); @@ -291,26 +304,27 @@ void ClientClosure::ProcessUnstableState() { chunkserverID_, chunkserverEndPoint_); switch (state) { - case UnstableState::ServerUnstable: { - std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); - int ret = metaCache_->SetServerUnstable(ip); - if (ret != 0) { - LOG(WARNING) << "Set server(" << ip << ") unstable failed, " - << "now set chunkserver(" << chunkserverID_ << ") unstable"; + case UnstableState::ServerUnstable: { + std::string ip = butil::ip2str(chunkserverEndPoint_.ip).c_str(); + int ret = metaCache_->SetServerUnstable(ip); + if (ret != 0) { + LOG(WARNING) + << "Set server(" << ip << ") unstable failed, " + << "now set chunkserver(" << chunkserverID_ << ") unstable"; + metaCache_->SetChunkserverUnstable(chunkserverID_); + } + break; + } + case UnstableState::ChunkServerUnstable: { metaCache_->SetChunkserverUnstable(chunkserverID_); + break; } - break; - } - case UnstableState::ChunkServerUnstable: { - metaCache_->SetChunkserverUnstable(chunkserverID_); - break; - } - case UnstableState::NoUnstable: { - RefreshLeader(); - break; - } - default: - break; + case UnstableState::NoUnstable: { + RefreshLeader(); + break; + } + default: + break; } } @@ -319,64 +333,58 @@ void ClientClosure::OnSuccess() { auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkNotExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " not exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " not exists, " + << *reqCtx_ << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); auto duration = cntl_->latency_us(); MetricHelper::LatencyRecord(fileMetric_, duration, reqCtx_->optype_); - MetricHelper::IncremRPCQPSCount( - fileMetric_, reqCtx_->rawlength_, reqCtx_->optype_); + MetricHelper::IncremRPCQPSCount(fileMetric_, reqCtx_->rawlength_, + reqCtx_->optype_); } void ClientClosure::OnChunkExist() { reqDone_->SetFailed(status_); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " exists, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " exists, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnEpochTooOld() { reqDone_->SetFailed(status_); LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " epoch too old, reqCtx: " << *reqCtx_ - << ", status: " << status_ - << ", retried times: " << reqDone_->GetRetriedTimes() - << ", IO id: " << reqDone_->GetIOTracker()->GetID() - << ", request id: " << reqCtx_->id_ - << ", remote side: " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " epoch too old, reqCtx: " << *reqCtx_ + << ", status: " << status_ + << ", retried times: " << reqDone_->GetRetriedTimes() + << ", IO id: " << reqDone_->GetIOTracker()->GetID() + << ", request id: " << reqCtx_->id_ << ", remote side: " + << butil::endpoint2str(cntl_->remote_side()).c_str(); } void ClientClosure::OnRedirected() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (response_->has_redirect() ? response_->redirect() : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (response_->has_redirect() ? response_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (response_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(response_->redirect()); @@ -390,13 +398,11 @@ void ClientClosure::OnRedirected() { void ClientClosure::OnCopysetNotExist() { LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " copyset not exists, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); RefreshLeader(); } @@ -443,23 +449,20 @@ void ClientClosure::RefreshLeader() { << ", IO id = " << reqDone_->GetIOTracker()->GetID() << ", request id = " << reqCtx_->id_; } else { - // 如果refresh leader获取到了新的leader信息 - // 则重试之前不进行睡眠 + // If refresh leader obtains new leader information, + // retry without sleeping before. retryDirectly_ = (leaderId != chunkserverID_); } } void ClientClosure::OnBackward() { const auto latestSn = metaCache_->GetLatestFileSn(); - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " return BACKWARD, " - << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " return BACKWARD, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); reqCtx_->seq_ = latestSn; } @@ -467,38 +470,26 @@ void ClientClosure::OnBackward() { void ClientClosure::OnInvalidRequest() { reqDone_->SetFailed(status_); LOG(ERROR) << OpTypeToString(reqCtx_->optype_) - << " failed for invalid format, " << *reqCtx_ - << ", status=" << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + << " failed for invalid format, " << *reqCtx_ + << ", status=" << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); MetricHelper::IncremFailRPCCount(fileMetric_, reqCtx_->optype_); } void WriteChunkClosure::SendRetryRequest() { - client_->WriteChunk(reqCtx_->idinfo_, - reqCtx_->fileId_, - reqCtx_->epoch_, - reqCtx_->seq_, - reqCtx_->writeData_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->WriteChunk(reqCtx_->idinfo_, reqCtx_->fileId_, reqCtx_->epoch_, + reqCtx_->seq_, reqCtx_->writeData_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } -void WriteChunkClosure::OnSuccess() { - ClientClosure::OnSuccess(); -} +void WriteChunkClosure::OnSuccess() { ClientClosure::OnSuccess(); } void ReadChunkClosure::SendRetryRequest() { - client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - reqCtx_->sourceInfo_, - done_); + client_->ReadChunk(reqCtx_->idinfo_, reqCtx_->seq_, reqCtx_->offset_, + reqCtx_->rawlength_, reqCtx_->sourceInfo_, done_); } void ReadChunkClosure::OnSuccess() { @@ -516,9 +507,7 @@ void ReadChunkClosure::OnChunkNotExist() { void ReadChunkSnapClosure::SendRetryRequest() { client_->ReadChunkSnapshot(reqCtx_->idinfo_, reqCtx_->seq_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + reqCtx_->offset_, reqCtx_->rawlength_, done_); } void ReadChunkSnapClosure::OnSuccess() { @@ -528,10 +517,8 @@ void ReadChunkSnapClosure::OnSuccess() { } void DeleteChunkSnapClosure::SendRetryRequest() { - client_->DeleteChunkSnapshotOrCorrectSn( - reqCtx_->idinfo_, - reqCtx_->correctedSeq_, - done_); + client_->DeleteChunkSnapshotOrCorrectSn(reqCtx_->idinfo_, + reqCtx_->correctedSeq_, done_); } void GetChunkInfoClosure::SendRetryRequest() { @@ -548,17 +535,16 @@ void GetChunkInfoClosure::OnSuccess() { } void GetChunkInfoClosure::OnRedirected() { - LOG(WARNING) << OpTypeToString(reqCtx_->optype_) - << " redirected, " << *reqCtx_ - << ", status = " << status_ - << ", retried times = " << reqDone_->GetRetriedTimes() - << ", IO id = " << reqDone_->GetIOTracker()->GetID() - << ", request id = " << reqCtx_->id_ - << ", redirect leader is " - << (chunkinforesponse_->has_redirect() ? chunkinforesponse_->redirect() - : "empty") - << ", remote side = " - << butil::endpoint2str(cntl_->remote_side()).c_str(); + LOG(WARNING) << OpTypeToString(reqCtx_->optype_) << " redirected, " + << *reqCtx_ << ", status = " << status_ + << ", retried times = " << reqDone_->GetRetriedTimes() + << ", IO id = " << reqDone_->GetIOTracker()->GetID() + << ", request id = " << reqCtx_->id_ << ", redirect leader is " + << (chunkinforesponse_->has_redirect() + ? chunkinforesponse_->redirect() + : "empty") + << ", remote side = " + << butil::endpoint2str(cntl_->remote_side()).c_str(); if (chunkinforesponse_->has_redirect()) { int ret = UpdateLeaderWithRedirectInfo(chunkinforesponse_->redirect()); @@ -571,19 +557,14 @@ void GetChunkInfoClosure::OnRedirected() { } void CreateCloneChunkClosure::SendRetryRequest() { - client_->CreateCloneChunk(reqCtx_->idinfo_, - reqCtx_->location_, - reqCtx_->seq_, - reqCtx_->correctedSeq_, - reqCtx_->chunksize_, - done_); + client_->CreateCloneChunk(reqCtx_->idinfo_, reqCtx_->location_, + reqCtx_->seq_, reqCtx_->correctedSeq_, + reqCtx_->chunksize_, done_); } void RecoverChunkClosure::SendRetryRequest() { - client_->RecoverChunk(reqCtx_->idinfo_, - reqCtx_->offset_, - reqCtx_->rawlength_, - done_); + client_->RecoverChunk(reqCtx_->idinfo_, reqCtx_->offset_, + reqCtx_->rawlength_, done_); } int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { @@ -601,7 +582,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->UpdateLeader(lpId, cpId, leaderAddr.addr_); if (ret != 0) { LOG(WARNING) << "Update leader of copyset (" << lpId << ", " << cpId - << ") in metaCache fail"; + << ") in metaCache fail"; return -1; } @@ -609,7 +590,7 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { ret = metaCache_->GetLeader(lpId, cpId, &leaderId, &leaderEp); if (ret != 0) { LOG(INFO) << "Get leader of copyset (" << lpId << ", " << cpId - << ") from metaCache fail"; + << ") from metaCache fail"; return -1; } @@ -617,5 +598,5 @@ int ClientClosure::UpdateLeaderWithRedirectInfo(const std::string& leaderInfo) { return 0; } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/src/client/chunk_closure.h b/src/client/chunk_closure.h index f5d9acd220..eb7e42221a 100644 --- a/src/client/chunk_closure.h +++ b/src/client/chunk_closure.h @@ -23,15 +23,16 @@ #ifndef SRC_CLIENT_CHUNK_CLOSURE_H_ #define SRC_CLIENT_CHUNK_CLOSURE_H_ -#include #include #include +#include + #include #include #include "proto/chunk.pb.h" -#include "src/client/client_config.h" #include "src/client/client_common.h" +#include "src/client/client_config.h" #include "src/client/client_metric.h" #include "src/client/request_closure.h" #include "src/common/math_util.h" @@ -42,15 +43,16 @@ namespace client { using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::ChunkResponse; using curve::chunkserver::GetChunkInfoResponse; -using ::google::protobuf::Message; using ::google::protobuf::Closure; +using ::google::protobuf::Message; class MetaCache; class CopysetClient; /** - * ClientClosure,负责保存Rpc上下文, - * 包含cntl和response已经重试次数 + * ClientClosure, responsible for maintaining the Rpc context, + * including the control (cntl) and response, as well as the + * retry count. */ class ClientClosure : public Closure { public: @@ -59,67 +61,59 @@ class ClientClosure : public Closure { virtual ~ClientClosure() = default; - void SetCntl(brpc::Controller* cntl) { - cntl_ = cntl; - } + void SetCntl(brpc::Controller* cntl) { cntl_ = cntl; } virtual void SetResponse(Message* response) { response_.reset(static_cast(response)); } - void SetChunkServerID(ChunkServerID csid) { - chunkserverID_ = csid; - } + void SetChunkServerID(ChunkServerID csid) { chunkserverID_ = csid; } - ChunkServerID GetChunkServerID() const { - return chunkserverID_; - } + ChunkServerID GetChunkServerID() const { return chunkserverID_; } void SetChunkServerEndPoint(const butil::EndPoint& endPoint) { chunkserverEndPoint_ = endPoint; } - EndPoint GetChunkServerEndPoint() const { - return chunkserverEndPoint_; - } + EndPoint GetChunkServerEndPoint() const { return chunkserverEndPoint_; } - // 统一Run函数入口 + // Unified Run Function Entry void Run() override; - // 重试请求 + // Retrying the request void OnRetry(); - // Rpc Failed 处理函数 + // Rpc Failed processing function void OnRpcFailed(); - // 返回成功 处理函数 + // Return successful processing function virtual void OnSuccess(); - // 返回重定向 处理函数 + // Return redirection processing function virtual void OnRedirected(); - // copyset不存在 + // copyset does not exist void OnCopysetNotExist(); - // 返回backward + // Return backward void OnBackward(); - // 返回chunk不存在 处理函数 + // Returning chunk with no processing function present virtual void OnChunkNotExist(); - // 返回chunk存在 处理函数 + // Return Chunk Existence Processing Function void OnChunkExist(); // handle epoch too old void OnEpochTooOld(); - // 非法参数 + // Illegal parameter void OnInvalidRequest(); - // 发送重试请求 + // Send retry request virtual void SendRetryRequest() = 0; - // 获取response返回的状态码 + // Obtain the status code returned by the response virtual CHUNK_OP_STATUS GetResponseStatus() const { return response_->status(); } @@ -132,45 +126,43 @@ class ClientClosure : public Closure { SetBackoffParam(); DVLOG(9) << "Client clousre conf info: " - << "chunkserverOPRetryIntervalUS = " - << failReqOpt_.chunkserverOPRetryIntervalUS - << ", chunkserverOPMaxRetry = " - << failReqOpt_.chunkserverOPMaxRetry; + << "chunkserverOPRetryIntervalUS = " + << failReqOpt_.chunkserverOPRetryIntervalUS + << ", chunkserverOPMaxRetry = " + << failReqOpt_.chunkserverOPMaxRetry; } - Closure* GetClosure() const { - return done_; - } + Closure* GetClosure() const { return done_; } - // 测试使用,设置closure - void SetClosure(Closure* done) { - done_ = done; - } + // Test usage, set closure + void SetClosure(Closure* done) { done_ = done; } - static FailureRequestOption GetFailOpt() { - return failReqOpt_; - } + static FailureRequestOption GetFailOpt() { return failReqOpt_; } /** - * 在重试之前根据返回值进行预处理 - * 场景1: rpc timeout,那么这时候会指数增加当前rpc的超时时间,然后直接进行重试 - * 场景2:底层OVERLOAD,那么需要在重试之前睡眠一段时间,睡眠时间根据重试次数指数增长 - * @param: rpcstatue为rpc返回值 - * @param: cntlstatus为本次rpc controller返回值 + * Preprocess based on the return value before retrying. + * Scenario 1: RPC timeout results in exponentially increasing + * the current RPC timeout and retrying immediately. + * Scenario 2: Underlying OVERLOAD condition requires sleeping + * for a period of time before retrying, where the sleep time + * exponentially increases based on the retry count. + * @param rpcstatus: Return value of the RPC. + * @param cntlstatus: Return value of the RPC controller for this instance. */ void PreProcessBeforeRetry(int rpcstatue, int cntlstatus); /** - * 底层chunkserver overload之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回当前的需要睡眠的时间 + * After underlying chunkserver overload, backoff is required + * based on the retry count. + * @param currentRetryTimes: The current number of retries. + * @return: Returns the current sleep time needed. */ static uint64_t OverLoadBackOff(uint64_t currentRetryTimes); /** - * rpc timeout之后需要根据重试次数进行退避 - * @param: currentRetryTimes为当前已重试的次数 - * @return: 返回下一次RPC 超时时间 + * After RPC timeout, backoff is required based on the retry count. + * @param currentRetryTimes: The current number of retries. + * @return: Returns the next RPC timeout duration. */ static uint64_t TimeoutBackOff(uint64_t currentRetryTimes); @@ -207,32 +199,35 @@ class ClientClosure : public Closure { void RefreshLeader(); - static FailureRequestOption failReqOpt_; - - brpc::Controller* cntl_; - std::unique_ptr response_; - CopysetClient* client_; - Closure* done_; - // 这里保存chunkserverID,是为了区别当前这个rpc是发给哪个chunkserver的 - // 这样方便在rpc closure里直接找到,当前是哪个chunkserver返回的失败 - ChunkServerID chunkserverID_; - butil::EndPoint chunkserverEndPoint_; - - // 记录当前请求的相关信息 - MetaCache* metaCache_; - RequestClosure* reqDone_; - FileMetric* fileMetric_; - RequestContext* reqCtx_; - ChunkIDInfo chunkIdInfo_; - - // 发送重试请求前是否睡眠 + static FailureRequestOption failReqOpt_; + + brpc::Controller* cntl_; + std::unique_ptr response_; + CopysetClient* client_; + Closure* done_; + + // Saving the Chunkserver ID here is to distinguish which Chunkserver + // this RPC is sent to. This makes it convenient to identify, within + // the RPC closure, which Chunkserver returned a failure. + + ChunkServerID chunkserverID_; + butil::EndPoint chunkserverEndPoint_; + + // Record relevant information for the current request + MetaCache* metaCache_; + RequestClosure* reqDone_; + FileMetric* fileMetric_; + RequestContext* reqCtx_; + ChunkIDInfo chunkIdInfo_; + + // Whether to sleep before sending a retry request bool retryDirectly_ = false; - // response 状态码 - int status_; + // response status code + int status_; - // rpc 状态码 - int cntlstatus_; + // rpc status code + int cntlstatus_; }; class WriteChunkClosure : public ClientClosure { @@ -308,7 +303,7 @@ class RecoverChunkClosure : public ClientClosure { void SendRetryRequest() override; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CHUNK_CLOSURE_H_ diff --git a/src/common/authenticator.h b/src/common/authenticator.h index 7d9ba319c3..f52560379a 100644 --- a/src/common/authenticator.h +++ b/src/common/authenticator.h @@ -30,31 +30,30 @@ namespace common { class Authenticator { public: /** - * bref: 获取要进行签名的字符串 - * @param: date, 当前的时间 - * @param: owner, 文件所有者 - * @return: 返回需要进行加密的字符串 + * bref: Get the string to be signed + * @param: date, current time + * @param: owner, file owner + * @return: Returns the string that needs to be encrypted */ static std::string GetString2Signature(uint64_t date, - const std::string& owner); + const std::string& owner); /** - * bref: 为字符串计算签名 - * @param: String2Signature, 需要进行签名计算的字符串 - * @param: secretKey, 为计算的秘钥 - * @return: 返回需要进行签名过后的字符串 + * bref: Calculate signature for string + * @param: String2Signature, a string that requires signature calculation + * @param: secretKey, which is the calculated secret key + * @return: Returns the string that needs to be signed */ static std::string CalcString2Signature(const std::string& String2Signature, const std::string& secretKey); private: - static int HMacSha256(const void* key, int key_size, - const void* data, int data_size, - void* digest); + static int HMacSha256(const void* key, int key_size, const void* data, + int data_size, void* digest); - static std::string Base64(const unsigned char *src, size_t sz); + static std::string Base64(const unsigned char* src, size_t sz); }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve #endif // SRC_COMMON_AUTHENTICATOR_H_ diff --git a/src/common/bitmap.cpp b/src/common/bitmap.cpp index dbff32702c..50d33181d9 100644 --- a/src/common/bitmap.cpp +++ b/src/common/bitmap.cpp @@ -20,20 +20,22 @@ * Author: yangyaokai */ +#include "src/common/bitmap.h" + #include #include -#include + #include -#include "src/common/bitmap.h" +#include namespace curve { namespace common { -std::string BitRangeVecToString(const std::vector &ranges) { +std::string BitRangeVecToString(const std::vector& ranges) { std::stringstream ss; for (uint32_t i = 0; i < ranges.size(); ++i) { if (i != 0) { - ss << ", "; + ss << ", "; } ss << "(" << ranges[i].beginIndex << "," << ranges[i].endIndex << ")"; } @@ -44,14 +46,14 @@ const uint32_t Bitmap::NO_POS = 0xFFFFFFFF; Bitmap::Bitmap(uint32_t bits) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memset(bitmap_, 0, count); } Bitmap::Bitmap(uint32_t bits, const char* bitmap) : bits_(bits) { int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -64,7 +66,7 @@ Bitmap::Bitmap(uint32_t bits, char* bitmap, bool transfer) : bits_(bits) { int count = unitCount(); if (!transfer) { - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; if (bitmap != nullptr) { memcpy(bitmap_, bitmap, count); @@ -87,18 +89,17 @@ Bitmap::~Bitmap() { Bitmap::Bitmap(const Bitmap& bitmap) { bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); } -Bitmap& Bitmap::operator = (const Bitmap& bitmap) { - if (this == &bitmap) - return *this; +Bitmap& Bitmap::operator=(const Bitmap& bitmap) { + if (this == &bitmap) return *this; delete[] bitmap_; bits_ = bitmap.Size(); int count = unitCount(); - bitmap_ = new(std::nothrow) char[count]; + bitmap_ = new (std::nothrow) char[count]; CHECK(bitmap_ != nullptr) << "allocate bitmap failed."; memcpy(bitmap_, bitmap.GetBitmap(), count); return *this; @@ -118,23 +119,19 @@ Bitmap& Bitmap::operator=(Bitmap&& other) noexcept { return *this; } -bool Bitmap::operator == (const Bitmap& bitmap) const { - if (bits_ != bitmap.Size()) - return false; +bool Bitmap::operator==(const Bitmap& bitmap) const { + if (bits_ != bitmap.Size()) return false; return 0 == memcmp(bitmap_, bitmap.GetBitmap(), unitCount()); } -bool Bitmap::operator != (const Bitmap& bitmap) const { +bool Bitmap::operator!=(const Bitmap& bitmap) const { return !(*this == bitmap); } -void Bitmap::Set() { - memset(bitmap_, 0xff, unitCount()); -} +void Bitmap::Set() { memset(bitmap_, 0xff, unitCount()); } void Bitmap::Set(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] |= mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] |= mask(index); } void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { @@ -144,13 +141,10 @@ void Bitmap::Set(uint32_t startIndex, uint32_t endIndex) { } } -void Bitmap::Clear() { - memset(bitmap_, 0, unitCount()); -} +void Bitmap::Clear() { memset(bitmap_, 0, unitCount()); } void Bitmap::Clear(uint32_t index) { - if (index < bits_) - bitmap_[indexOfUnit(index)] &= ~mask(index); + if (index < bits_) bitmap_[indexOfUnit(index)] &= ~mask(index); } void Bitmap::Clear(uint32_t startIndex, uint32_t endIndex) { @@ -169,106 +163,93 @@ bool Bitmap::Test(uint32_t index) const { uint32_t Bitmap::NextSetBit(uint32_t index) const { for (; index < bits_; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextSetBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; - // bitmap中最后一个bit的index值 + // The index value of the last bit in the bitmap uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (Test(index)) - break; + if (Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t index) const { for (; index < bits_; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index >= bits_) - index = NO_POS; + if (index >= bits_) index = NO_POS; return index; } uint32_t Bitmap::NextClearBit(uint32_t startIndex, uint32_t endIndex) const { uint32_t index = startIndex; uint32_t lastIndex = bits_ - 1; - // endIndex值不能超过lastIndex - if (endIndex > lastIndex) - endIndex = lastIndex; + // The endIndex value cannot exceed lastIndex + if (endIndex > lastIndex) endIndex = lastIndex; for (; index <= endIndex; ++index) { - if (!Test(index)) - break; + if (!Test(index)) break; } - if (index > endIndex) - index = NO_POS; + if (index > endIndex) index = NO_POS; return index; } -void Bitmap::Divide(uint32_t startIndex, - uint32_t endIndex, +void Bitmap::Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const { - // endIndex的值不能小于startIndex - if (endIndex < startIndex) - return; + // The value of endIndex cannot be less than startIndex + if (endIndex < startIndex) return; - // endIndex值不能超过lastIndex + // The endIndex value cannot exceed lastIndex uint32_t lastIndex = bits_ - 1; - if (endIndex > lastIndex) - endIndex = lastIndex; + if (endIndex > lastIndex) endIndex = lastIndex; BitRange clearRange; BitRange setRange; vector tmpClearRanges; vector tmpSetRanges; - // 下一个位为0的index + // Next index with 0 bits uint32_t nextClearIndex; - // 下一个位为1的index + // Next index with bit 1 uint32_t nextSetIndex; - // 划分所有range + // Divide all ranges while (startIndex != NO_POS) { nextClearIndex = NextClearBit(startIndex, endIndex); - // 1.存放当前clear index之前的 set range - // nextClearIndex如果等于startIndex说明前面没有 set range + // 1. Store the set range before the current clear index + // If nextClearIndex is equal to startIndex, it indicates that there + // is no set range before it if (nextClearIndex != startIndex) { setRange.beginIndex = startIndex; - // nextClearIndex等于NO_POS说明已经找到末尾 - // 最后一块连续区域是 set range - setRange.endIndex = nextClearIndex == NO_POS - ? endIndex - : nextClearIndex - 1; + // nextClearIndex equals NO_POS description has found the end + // The last continuous area is set range + setRange.endIndex = + nextClearIndex == NO_POS ? endIndex : nextClearIndex - 1; tmpSetRanges.push_back(setRange); } - if (nextClearIndex == NO_POS) - break; + if (nextClearIndex == NO_POS) break; nextSetIndex = NextSetBit(nextClearIndex, endIndex); - // 2.存放当前set index之前的 clear range - // 能到这一步说明前面肯定存在clear range,所以不用像第1步一样做判断 + // 2. Store the clear range before the current set index + // Being able to reach this step indicates that there must be a clear + // range ahead, so there is no need to make a judgment like in step 1 clearRange.beginIndex = nextClearIndex; - clearRange.endIndex = nextSetIndex == NO_POS - ? endIndex - : nextSetIndex - 1; + clearRange.endIndex = + nextSetIndex == NO_POS ? endIndex : nextSetIndex - 1; tmpClearRanges.push_back(clearRange); startIndex = nextSetIndex; } - // 根据参数中的clearRanges和setRanges指针是否为空返回结果 + // Returns a result based on whether the clearRanges and setRanges pointers + // in the parameters are empty if (clearRanges != nullptr) { *clearRanges = std::move(tmpClearRanges); } @@ -277,13 +258,9 @@ void Bitmap::Divide(uint32_t startIndex, } } -uint32_t Bitmap::Size() const { - return bits_; -} +uint32_t Bitmap::Size() const { return bits_; } -const char* Bitmap::GetBitmap() const { - return bitmap_; -} +const char* Bitmap::GetBitmap() const { return bitmap_; } } // namespace common } // namespace curve diff --git a/src/common/bitmap.h b/src/common/bitmap.h index e7a0e1270d..370c55e070 100644 --- a/src/common/bitmap.h +++ b/src/common/bitmap.h @@ -24,8 +24,9 @@ #define SRC_COMMON_BITMAP_H_ #include -#include + #include +#include namespace curve { namespace common { @@ -36,30 +37,30 @@ const int BITMAP_UNIT_SIZE = 8; const int ALIGN_FACTOR = 3; // 2 ^ ALIGN_FACTOR = BITMAP_UNIT_SIZE /** - * 表示bitmap中的一段连续区域,为闭区间 + * Represents a continuous region in a bitmap, which is a closed interval */ struct BitRange { - // 连续区域起始位置在bitmap中的索引 + // Index of the starting position of a continuous region in Bitmap uint32_t beginIndex; - // 连续区域结束位置在bitmap中的索引 + // Index of the end position of a continuous region in Bitmap uint32_t endIndex; }; - -std::string BitRangeVecToString(const std::vector &ranges); +std::string BitRangeVecToString(const std::vector& ranges); class Bitmap { public: /** - * 新建bitmap时的构造函数 - * @param bits: 要构造的bitmap的位数 + * Constructor when creating a new bitmap + * @param bits: The number of bits to construct the bitmap */ explicit Bitmap(uint32_t bits); /** - * 从已有的快照文件初始化时的构造函数 - * 构造函数内部会再new一个新的bitmap,然后从参数中的bitmap memcpy过去 - * @param bits: bitmap的位数 - * @param bitmap: 外部提供的用于初始化的bitmap + * Constructor when initializing from an existing snapshot file + * The constructor will create a new bitmap internally, and then use the + * bitmap memcpy in the parameters + * @param bits: Bitmap bits + * @param bitmap: An externally provided bitmap for initialization */ explicit Bitmap(uint32_t bits, const char* bitmap); @@ -70,142 +71,158 @@ class Bitmap { ~Bitmap(); /** - * 拷贝构造,使用深拷贝 - * @param bitmap:从该对象拷贝内容 + * Copy construction, using deep copy + * @param bitmap: Copy content from this object */ Bitmap(const Bitmap& bitmap); /** - * 赋值函数,使用深拷贝 - * @param bitmap:从该对象拷贝内容 - * @reutrn:返回拷贝后对象引用 + * Assignment function, using deep copy + * @param bitmap: Copy content from this object + * @reutrn: Returns the copied object reference */ - Bitmap& operator = (const Bitmap& bitmap); + Bitmap& operator=(const Bitmap& bitmap); Bitmap(Bitmap&& other) noexcept; Bitmap& operator=(Bitmap&& other) noexcept; /** - * 比较两个bitmap是否相同 - * @param bitmap:待比较的bitmap - * @return:如果相同返回true,如果不同返回false + * Compare whether two bitmaps are the same + * @param bitmap: Bitmap to be compared + * @return: Returns true if the same, false if different */ - bool operator == (const Bitmap& bitmap) const; + bool operator==(const Bitmap& bitmap) const; /** - * 比较两个bitmap是否不同 - * @param bitmap:待比较的bitmap - * @return:如果不同返回true,如果相同返回false + * Compare whether two bitmaps are different + * @param bitmap: Bitmap to be compared + * @return: Returns true if different, false if the same */ - bool operator != (const Bitmap& bitmap) const; + bool operator!=(const Bitmap& bitmap) const; /** - * 将所有位置1 + * Place all positions 1 */ void Set(); /** - * 将指定位置1 - * @param index: 指定位的位置 + * Specify position 1 + * @param index: Refers to the location of the positioning */ void Set(uint32_t index); /** - * 将指定范围的位置为1 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 1 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Set(uint32_t startIndex, uint32_t endIndex); /** - * 将所有位置0 + * Move all positions to 0 */ void Clear(); /** - * 将指定位置0 - * @param index: 指定位的位置 + * Will specify position 0 + * @param index: Refers to the location of the positioning */ void Clear(uint32_t index); /** - * 将指定范围的位置为0 - * @param startIndex: 范围起始位置,包括此位置 - * @param endIndex: 范围结束位置,包括此位置 + * Set the position of the specified range to 0 + * @param startIndex: The starting position of the range, including this + * position + * @param endIndex: The end position of the range, including this position */ void Clear(uint32_t startIndex, uint32_t endIndex); /** - * 获取指定位置位的状态 - * @param index: 指定位的位置 - * @return: true表示当前位状态为1,false表示为0 + * Obtain the status of the specified position bit + * @param index: Refers to the location of the positioning + * @return: true indicates that the current bit status is 1, while false + * indicates that it is 0 */ bool Test(uint32_t index) const; /** - * 获取指定位置及之后的首个位为1的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为1的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 1 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 1. If it does not exist, + * return NO_POS */ uint32_t NextSetBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为1的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为1的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 1 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 1. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextSetBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 获取指定位置及之后的首个位为0的位置 - * @param index: 指定位的位置,包含此位置 - * @return: 首个位为0的位置,如果不存在返回NO_POS + * Obtain the specified position and the position after which the first bit + * is 0 + * @param index: Refers to the location of the positioning, including this + * location + * @return: The position where the first bit is 0. If it does not exist, + * return NO_POS */ uint32_t NextClearBit(uint32_t index) const; /** - * 获取指定起始位置到结束位置之间的的首个位为0的位置 - * @param startIndex: 起始位置,包含此位置 - * @param endIndex: 结束位置,包含此位置 - * @return: 首个位为0的位置,如果指定范围内不存在则返回NO_POS + * Gets the position where the first bit between the specified start + * position and end position is 0 + * @param startIndex: The starting position, including this position + * @param endIndex: End position, including this position + * @return: The position where the first bit is 0. If it does not exist + * within the specified range, return NO_POS */ uint32_t NextClearBit(uint32_t startIndex, uint32_t endIndex) const; /** - * 将bitmap的指定区域分割成若干连续区域,划分依据为位状态,连续区域内的位状态一致 - * 例如:00011100会被划分为三个区域,[0,2]、[3,5]、[6,7] - * @param startIndex: 指定区域的起始索引 - * @param endIndex: 指定范围的结束索引 - * @param clearRanges: 存放位状态为0的连续区域的向量,可以指定为nullptr - * @param setRanges: 存放位状态为1的连续区域的向量,可以指定为nullptr - */ - void Divide(uint32_t startIndex, - uint32_t endIndex, + * Divide the designated area of the bitmap into several continuous areas + * based on bit states, with consistent bit states within the continuous + * areas For example, 00011100 will be divided into three regions: [0,2], + * [3,5], [6,7] + * @param startIndex: The starting index of the specified region + * @param endIndex: The end index of the specified range + * @param clearRanges: A vector that stores a continuous region with a bit + * state of 0, which can be specified as nullptr + * @param setRanges: A vector that stores a continuous region with a bit + * state of 1, which can be specified as nullptr + */ + void Divide(uint32_t startIndex, uint32_t endIndex, vector* clearRanges, vector* setRanges) const; /** - * bitmap的有效位数 - * @return: 返回位数 + * Bitmap's significant digits + * @return: Returns the number of digits */ uint32_t Size() const; /** - * 获取bitmap的内存指针,用于持久化bitmap - * @return: bitmap的内存指针 + * Obtain a memory pointer to Bitmap for persisting Bitmap + * @return: Memory pointer to bitmap */ const char* GetBitmap() const; private: - // bitmap的字节数 + // Bytes of bitmap int unitCount() const { - // 同 (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE + // Same as (bits_ + BITMAP_UNIT_SIZE - 1) / BITMAP_UNIT_SIZE return (bits_ + BITMAP_UNIT_SIZE - 1) >> ALIGN_FACTOR; } - // 指定位置的bit在其所在字节中的偏移 + // The offset of the bit at the specified position in its byte int indexOfUnit(uint32_t index) const { - // 同 index / BITMAP_UNIT_SIZE + // Same as index / BITMAP_UNIT_SIZE return index >> ALIGN_FACTOR; } - // 逻辑计算掩码值 + // Logical calculation mask value char mask(uint32_t index) const { - int indexInUnit = index % BITMAP_UNIT_SIZE; + int indexInUnit = index % BITMAP_UNIT_SIZE; char mask = 0x01 << indexInUnit; return mask; } public: - // 表示不存在的位置,值为0xffffffff + // Represents a non-existent position, with a value of 0xffffffff static const uint32_t NO_POS; private: - uint32_t bits_; - char* bitmap_; + uint32_t bits_; + char* bitmap_; }; } // namespace common diff --git a/src/common/channel_pool.h b/src/common/channel_pool.h index 458baa33d3..fb549023e9 100644 --- a/src/common/channel_pool.h +++ b/src/common/channel_pool.h @@ -24,9 +24,10 @@ #define SRC_COMMON_CHANNEL_POOL_H_ #include -#include -#include + #include +#include +#include #include #include "src/common/concurrent/concurrent.h" @@ -39,18 +40,18 @@ namespace common { class ChannelPool { public: /** - * @brief 从channelMap获取或创建并Init到指定地址的channel + * @brief Obtain or create a channel from channelMap and Init it to the + * specified address * - * @param addr 对端的地址 - * @param[out] channelPtr 到指定地址的channel + * @param addr The address of the opposite end + * @param[out] channelPtr to the specified channel address * - * @return 成功返回0,失败返回-1 + * @return returns 0 for success, -1 for failure */ - int GetOrInitChannel(const std::string& addr, - ChannelPtr* channelPtr); + int GetOrInitChannel(const std::string& addr, ChannelPtr* channelPtr); /** - * @brief 清空map + * @brief Clear map */ void Clear(); @@ -62,5 +63,4 @@ class ChannelPool { } // namespace common } // namespace curve -#endif // SRC_COMMON_CHANNEL_POOL_H_ - +#endif // SRC_COMMON_CHANNEL_POOL_H_ diff --git a/src/common/concurrent/bounded_blocking_queue.h b/src/common/concurrent/bounded_blocking_queue.h index 56c59fcfc1..7d8449d812 100644 --- a/src/common/concurrent/bounded_blocking_queue.h +++ b/src/common/concurrent/bounded_blocking_queue.h @@ -23,12 +23,12 @@ #ifndef SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ #define SRC_COMMON_CONCURRENT_BOUNDED_BLOCKING_QUEUE_H_ +#include #include +#include //NOLINT #include -#include //NOLINT #include -#include //NOLINT -#include +#include //NOLINT #include #include "src/common/uncopyable.h" @@ -36,18 +36,17 @@ namespace curve { namespace common { -template +template class BBQItem { public: - explicit BBQItem(const T &t, bool stop = false) - : item_(t) { + explicit BBQItem(const T& t, bool stop = false) : item_(t) { stop_.store(stop, std::memory_order_release); } - BBQItem(const BBQItem &bbqItem) { + BBQItem(const BBQItem& bbqItem) { item_ = bbqItem.item_; stop_.store(bbqItem.stop_, std::memory_order_release); } - BBQItem &operator=(const BBQItem &bbqItem) { + BBQItem& operator=(const BBQItem& bbqItem) { if (&bbqItem == this) { return *this; } @@ -56,13 +55,9 @@ class BBQItem { return *this; } - bool IsStop() const { - return stop_.load(std::memory_order_acquire); - } + bool IsStop() const { return stop_.load(std::memory_order_acquire); } - T Item() { - return item_; - } + T Item() { return item_; } private: T item_; @@ -70,18 +65,13 @@ class BBQItem { }; /** - * 有 capacity 限制的阻塞队列,线程安全 + * Blocking queues with capacity restrictions, thread safe */ -template +template class BoundedBlockingDeque : public Uncopyable { public: BoundedBlockingDeque() - : mutex_(), - notEmpty_(), - notFull_(), - deque_(), - capacity_(0) { - } + : mutex_(), notEmpty_(), notFull_(), deque_(), capacity_(0) {} int Init(const int capacity) { if (0 >= capacity) { @@ -91,7 +81,7 @@ class BoundedBlockingDeque : public Uncopyable { return 0; } - void PutBack(const T &x) { + void PutBack(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); @@ -100,7 +90,7 @@ class BoundedBlockingDeque : public Uncopyable { notEmpty_.notify_one(); } - void PutFront(const T &x) { + void PutFront(const T& x) { std::unique_lock guard(mutex_); while (deque_.size() == capacity_) { notFull_.wait(guard); diff --git a/src/tools/chunkserver_client.cpp b/src/tools/chunkserver_client.cpp index 69eb492d5c..40b1eb24aa 100644 --- a/src/tools/chunkserver_client.cpp +++ b/src/tools/chunkserver_client.cpp @@ -28,11 +28,10 @@ namespace curve { namespace tool { std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { - uint64_t groupId = (static_cast(chunk.logicPoolId) << 32) | - chunk.copysetId; + uint64_t groupId = + (static_cast(chunk.logicPoolId) << 32) | chunk.copysetId; os << "logicalPoolId:" << chunk.logicPoolId - << ",copysetId:" << chunk.copysetId - << ",groupId:" << groupId + << ",copysetId:" << chunk.copysetId << ",groupId:" << groupId << ",chunkId:" << chunk.chunkId; return os; } @@ -40,8 +39,8 @@ std::ostream& operator<<(std::ostream& os, const Chunk& chunk) { int ChunkServerClient::Init(const std::string& csAddr) { csAddr_ = csAddr; if (channel_.Init(csAddr.c_str(), nullptr) != 0) { - std::cout << "Init channel to chunkserver: " << csAddr - << " failed!" << std::endl; + std::cout << "Init channel to chunkserver: " << csAddr << " failed!" + << std::endl; return -1; } return 0; @@ -69,7 +68,7 @@ int ChunkServerClient::GetRaftStatus(butil::IOBuf* iobuf) { } retryTimes++; } - // 只打最后一次失败的原因 + // Outputs only the reason for the last failure. std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -97,9 +96,8 @@ bool ChunkServerClient::CheckChunkServerOnline() { return false; } -int ChunkServerClient::GetCopysetStatus( - const CopysetStatusRequest& request, - CopysetStatusResponse* response) { +int ChunkServerClient::GetCopysetStatus(const CopysetStatusRequest& request, + CopysetStatusResponse* response) { brpc::Controller cntl; curve::chunkserver::CopysetService_Stub stub(&channel_); uint64_t retryTimes = 0; @@ -112,17 +110,16 @@ int ChunkServerClient::GetCopysetStatus( continue; } if (response->status() != - COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response->status() << std::endl; + << ", errCode: " << response->status() << std::endl; return -1; } else { return 0; } } - // 只打最后一次失败的原因 + // Outputs only the reason for the last failure. std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; @@ -151,15 +148,14 @@ int ChunkServerClient::GetChunkHash(const Chunk& chunk, if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS) { std::cout << "GetCopysetStatus fail, request: " << request.DebugString() - << ", errCode: " - << response.status() << std::endl; + << ", errCode: " << response.status() << std::endl; return -1; } else { *chunkHash = response.hash(); return 0; } } - // 只打最后一次失败的原因 + // Outputs only the reason for the last failure. std::cout << "Send RPC to chunkserver fail, error content: " << cntl.ErrorText() << std::endl; return -1; diff --git a/src/tools/chunkserver_client.h b/src/tools/chunkserver_client.h index 5945737ae8..3ef9282239 100644 --- a/src/tools/chunkserver_client.h +++ b/src/tools/chunkserver_client.h @@ -23,30 +23,30 @@ #ifndef SRC_TOOLS_CHUNKSERVER_CLIENT_H_ #define SRC_TOOLS_CHUNKSERVER_CLIENT_H_ -#include -#include #include +#include +#include -#include #include +#include #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/tools/curve_tool_define.h" +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::CopysetStatusRequest; using curve::chunkserver::CopysetStatusResponse; -using curve::chunkserver::COPYSET_OP_STATUS; using curve::chunkserver::GetChunkHashRequest; using curve::chunkserver::GetChunkHashResponse; -using curve::chunkserver::CHUNK_OP_STATUS; namespace curve { namespace tool { struct Chunk { - Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) : - logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} + Chunk(uint32_t poolId, uint32_t csId, uint64_t chunkId2) + : logicPoolId(poolId), copysetId(csId), chunkId(chunkId2) {} uint32_t logicPoolId; uint32_t copysetId; uint64_t chunkId; @@ -58,40 +58,45 @@ class ChunkServerClient { public: virtual ~ChunkServerClient() = default; /** - * @brief 初始化channel,对一个地址,初始化一次就好 - * @param csAddr chunkserver地址 - * @return 成功返回0,失败返回-1 - */ + * @brief initializes the channel. For an address, just initialize it once + * @param csAddr chunkserver address + * @return returns 0 for success, -1 for failure + */ virtual int Init(const std::string& csAddr); /** - * @brief 调用braft的RaftStat接口获取复制组的详细信息,放到iobuf里面 - * @param iobuf 复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Invoke the RaftStat interface of braft to retrieve detailed + * information about the replication group and store it in the 'iobuf'. + * @param iobuf: Replication group details; valid when the return value is + * 0. + * @return 0 on success, -1 on failure. + */ virtual int GetRaftStatus(butil::IOBuf* iobuf); /** - * @brief 检查chunkserver是否在线,只检查controller,不检查response - * @return 在线返回true,不在线返回false - */ + * @brief Check if the chunkserver is online, only check the controller, not + * the response. + * @return true if online, false if offline. + */ virtual bool CheckChunkServerOnline(); /** - * @brief 调用chunkserver的GetCopysetStatus接口 - & @param request 查询copyset的request - * @param response 返回的response,里面有复制组详细信息,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Invoke the GetCopysetStatus interface of the chunkserver. + * @param request: The request to query the copyset. + * @param[out] response: The response containing detailed information about + * the replication group; valid when the return value is 0. + * @return 0 on success, -1 on failure. + */ virtual int GetCopysetStatus(const CopysetStatusRequest& request, CopysetStatusResponse* response); /** - * @brief 从chunkserver获取chunk的hash值 - & @param chunk 要查询的chunk - * @param[out] chunkHash chunk的hash值,返回值为0时有效 - * @return 成功返回0,失败返回-1 - */ + * @brief Obtain the hash value of a chunk from the chunkserver. + * @param chunk: The chunk to be queried. + * @param[out] chunkHash: The hash value of the chunk; valid when the return + * value is 0. + * @return 0 on success, -1 on failure. + */ virtual int GetChunkHash(const Chunk& chunk, std::string* chunkHash); private: diff --git a/src/tools/chunkserver_tool_factory.h b/src/tools/chunkserver_tool_factory.h index 9a1e344b3c..a1f71c33c6 100644 --- a/src/tools/chunkserver_tool_factory.h +++ b/src/tools/chunkserver_tool_factory.h @@ -26,9 +26,9 @@ #include #include +#include "src/fs/ext4_filesystem_impl.h" #include "src/tools/curve_meta_tool.h" #include "src/tools/raft_log_tool.h" -#include "src/fs/ext4_filesystem_impl.h" namespace curve { namespace tool { @@ -38,20 +38,21 @@ using curve::fs::Ext4FileSystemImpl; class ChunkServerToolFactory { public: /** - * @brief 根据输入的command获取CurveTool对象 - * @param command 要执行的命令的名称 - * @return CurveTool实例 + * @brief Retrieve the CurveTool object based on the input command + * @param command: The name of the command to be executed + * @return CurveTool instance */ static std::shared_ptr GenerateChunkServerTool( - const std::string& command); + const std::string& command); + private: /** - * @brief 获取CurveMetaTool实例 + * @brief Get CurveMetaTool instance */ static std::shared_ptr GenerateCurveMetaTool(); /** - * @brief 获取RaftLogMetaTool实例 + * @brief Get RaftLogMetaTool instance */ static std::shared_ptr GenerateRaftLogTool(); }; diff --git a/test/chunkserver/braft_cli_service2_test.cpp b/test/chunkserver/braft_cli_service2_test.cpp index cc97980aa2..1e1ca66d8c 100644 --- a/test/chunkserver/braft_cli_service2_test.cpp +++ b/test/chunkserver/braft_cli_service2_test.cpp @@ -20,25 +20,26 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service2.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service2.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" -#include "src/common/uuid.h" #include "src/common/timeutility.h" +#include "src/common/uuid.h" #include "src/fs/local_filesystem.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -48,10 +49,12 @@ using curve::common::UUIDGenerator; class BraftCliService2Test : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { peer1.set_address("127.0.0.1:9310:0"); @@ -75,10 +78,10 @@ class BraftCliService2Test : public testing::Test { } public: - const char *ip = "127.0.0.1"; - int port = 9310; - const char *confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; - int snapshotInterval = 3600; // 防止自动打快照 + const char* ip = "127.0.0.1"; + int port = 9310; + const char* confs = "127.0.0.1:9310:0,127.0.0.1:9311:0,127.0.0.1:9312:0"; + int snapshotInterval = 3600; // Prevent automatic snapshot taking int electionTimeoutMs = 3000; pid_t pid1; @@ -128,12 +131,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dirMap[peer1.address()]; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -143,12 +142,8 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dirMap[peer2.address()]; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -158,16 +153,12 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dirMap[peer3.address()]; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ WaitpidGuard waitpidGuard(pid1, pid2, pid3); ::usleep(1.2 * 1000 * electionTimeoutMs); @@ -182,15 +173,15 @@ TEST_F(BraftCliService2Test, basic2) { options.timeout_ms = 3000; options.max_retry = 3; - /* add peer - 非法copyset */ + /* Add peer - illegal copyset */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); AddPeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -210,10 +201,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法peerid */ + /* Add peer - illegal peer id */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -223,7 +214,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_addpeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); AddPeerResponse2 response; @@ -237,13 +228,14 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* Add peer - sent to peers who are not leaders */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 + // Find a peer that is not a leader and send the configuration change + // request to it for processing if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; @@ -274,15 +266,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法copyset */ + /* Remove peer - illegal copyset */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); RemovePeerRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -302,10 +294,10 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法peer id */ + /* Remove peer - illegal peer id */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -315,7 +307,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_removepeer(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); RemovePeerResponse2 response; @@ -329,15 +321,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "remove peer: " << cntl.ErrorText(); } - /* remove peer - 发送给不是leader的peer */ + /* Remove peer - sent to peers who are not leaders */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); PeerId leaderId; LOG(INFO) << "true leader is: " << gLeader.address(); - // 找一个不是leader的peer,然后将配置变更请求发送给它处理 - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader and send the configuration change + // request to it for processing + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { leaderId.parse(peer2.address()); *leaderPeer = peer2; } else { @@ -367,15 +359,15 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法copyset */ + /* Transfer leader - illegal copyset */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); TransferLeaderRequest2 request; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_leader(leaderPeer); @@ -395,8 +387,8 @@ TEST_F(BraftCliService2Test, basic2) { } /* transfer leader to leader */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -417,10 +409,10 @@ TEST_F(BraftCliService2Test, basic2) { stub.TransferLeader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法peer */ + /* Transfer leader - illegal peer */ { - Peer *leaderPeer = new Peer(); - Peer *peer = new Peer(); + Peer* leaderPeer = new Peer(); + Peer* peer = new Peer(); brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); @@ -430,7 +422,7 @@ TEST_F(BraftCliService2Test, basic2) { request.set_allocated_leader(leaderPeer); *leaderPeer = gLeader; request.set_allocated_transferee(peer); - // request中的peer id是非法的 + // The peer id in the request is illegal peer->set_address("127.0.0"); TransferLeaderResponse2 response; @@ -444,18 +436,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "Transfer leader peer: " << cntl.ErrorText(); } - /* get leader - 非法copyset */ + /* Get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderAddr, NULL)); - GetLeaderRequest2 request; GetLeaderResponse2 response; brpc::Controller cntl; - // 设置一个不存在的logicPoolId + // Set a non-existent logicPoolId request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); @@ -467,14 +458,13 @@ TEST_F(BraftCliService2Test, basic2) { /* remove peer then add peer */ { // 1 remove peer - Peer *removePeer = new Peer(); - Peer *leaderPeer1 = new Peer(); - Peer *leaderPeer2 = new Peer(); - Peer *addPeer = new Peer(); + Peer* removePeer = new Peer(); + Peer* leaderPeer1 = new Peer(); + Peer* leaderPeer2 = new Peer(); + Peer* addPeer = new Peer(); PeerId removePeerId; - // 找一个不是leader的peer,作为remove peer - if (0 - == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { + // Find a peer that is not a leader as a remove peer + if (0 == strcmp(gLeader.address().c_str(), peer1.address().c_str())) { removePeerId.parse(peer2.address()); *removePeer = peer2; } else { @@ -508,7 +498,6 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl1.Failed()); ASSERT_EQ(0, cntl1.ErrorCode()); - // add peer AddPeerRequest2 request2; request2.set_logicpoolid(logicPoolId); @@ -529,17 +518,17 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_FALSE(cntl2.Failed()); ASSERT_EQ(0, cntl2.ErrorCode()); } - /* snapshot - 非法copyset */ + /* Snapshot - illegal copyset */ { PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); SnapshotRequest2 request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); *peerPtr = peer1; request.set_allocated_peer(peerPtr); @@ -557,11 +546,12 @@ TEST_F(BraftCliService2Test, basic2) { } /* snapshot - normal */ { - // 初始状态快照不为空 + // The initial state snapshot is not empty std::string copysetDataDir = dirMap[gLeader.address()] + "/" + - ToGroupId(logicPoolId, copysetId) + "/" + RAFT_LOG_DIR; + ToGroupId(logicPoolId, copysetId) + "/" + + RAFT_LOG_DIR; std::shared_ptr fs( - LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); + LocalFsFactory::CreateFs(curve::fs::FileSystemType::EXT4, "")); std::vector files; fs->List(copysetDataDir.c_str(), &files); ASSERT_GE(files.size(), 1); @@ -574,7 +564,7 @@ TEST_F(BraftCliService2Test, basic2) { SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(); + Peer* peerPtr = new Peer(); peerPtr->set_address(leaderId.to_string()); request.set_allocated_peer(peerPtr); @@ -586,19 +576,20 @@ TEST_F(BraftCliService2Test, basic2) { LOG(INFO) << "Start do snapshot"; CliService2_Stub stub(&channel); stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); - // 需要连续打两次快照才能删除第一次快照时的log + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); + // Two consecutive snapshots are required to delete the log from the + // first snapshot sleep(5); cntl.Reset(); LOG(INFO) << "Start do snapshot"; stub.Snapshot(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot fail, error: " << cntl.ErrorText(); for (int i = 0; i < 60; ++i) { files.clear(); fs->List(copysetDataDir.c_str(), &files); - // 打完快照应该只剩下meta信息 + // After taking the snapshot, only meta information should be left if (files.size() == 1) { break; } @@ -619,18 +610,18 @@ TEST_F(BraftCliService2Test, basic2) { CliService2_Stub stub(&channel); stub.SnapshotAll(&cntl, &request, &response, NULL); - ASSERT_FALSE(cntl.Failed()) << "Do snapshot all fail, error: " - << cntl.ErrorText(); + ASSERT_FALSE(cntl.Failed()) + << "Do snapshot all fail, error: " << cntl.ErrorText(); } - /* reset peer - 非法 copyset */ + /* Reset peer - illegal copyset */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; ASSERT_EQ(0, channel.Init(peer.addr, NULL)); ResetPeerRequest2 request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_allocated_requestpeer(targetPeer); @@ -646,9 +637,9 @@ TEST_F(BraftCliService2Test, basic2) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* reset peer - new peer为空 */ + /* Reset peer - new peer is empty */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; @@ -669,7 +660,7 @@ TEST_F(BraftCliService2Test, basic2) { } /* reset peer - normal */ { - Peer *targetPeer = new Peer(); + Peer* targetPeer = new Peer(); *targetPeer = peer1; PeerId peer(peer1.address()); brpc::Channel channel; diff --git a/test/chunkserver/braft_cli_service_test.cpp b/test/chunkserver/braft_cli_service_test.cpp index 50f04588af..29d65a9af1 100644 --- a/test/chunkserver/braft_cli_service_test.cpp +++ b/test/chunkserver/braft_cli_service_test.cpp @@ -20,21 +20,22 @@ * Author: wudemiao */ -#include -#include -#include -#include +#include "src/chunkserver/braft_cli_service.h" + #include #include #include +#include +#include +#include +#include #include +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/braft_cli_service.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" #include "test/chunkserver/chunkserver_test_util.h" namespace curve { @@ -43,10 +44,12 @@ namespace chunkserver { class BraftCliServiceTest : public testing::Test { protected: static void SetUpTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "SetUpTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "SetUpTestCase"; } static void TearDownTestCase() { - LOG(INFO) << "BraftCliServiceTest " << "TearDownTestCase"; + LOG(INFO) << "BraftCliServiceTest " + << "TearDownTestCase"; } virtual void SetUp() { Exec("mkdir 6"); @@ -68,9 +71,9 @@ class BraftCliServiceTest : public testing::Test { butil::AtExitManager atExitManager; TEST_F(BraftCliServiceTest, basic) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9015; - const char *confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; + const char* confs = "127.0.0.1:9015:0,127.0.0.1:9016:0,127.0.0.1:9017:0"; int snapshotInterval = 600; PeerId peer1("127.0.0.1:9015:0"); PeerId peer2("127.0.0.1:9016:0"); @@ -87,12 +90,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 1 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid1) { - const char *copysetdir = "local://./6"; - StartChunkserver(ip, - port + 0, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./6"; + StartChunkserver(ip, port + 0, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -102,12 +101,8 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 2 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid2) { - const char *copysetdir = "local://./7"; - StartChunkserver(ip, - port + 1, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./7"; + StartChunkserver(ip, port + 1, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } @@ -117,17 +112,13 @@ TEST_F(BraftCliServiceTest, basic) { std::cerr << "fork chunkserver 3 failed" << std::endl; ASSERT_TRUE(false); } else if (0 == pid3) { - const char *copysetdir = "local://./8"; - StartChunkserver(ip, - port + 2, - copysetdir, - confs, - snapshotInterval, + const char* copysetdir = "local://./8"; + StartChunkserver(ip, port + 2, copysetdir, confs, snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -144,6 +135,7 @@ TEST_F(BraftCliServiceTest, basic) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -166,7 +158,7 @@ TEST_F(BraftCliServiceTest, basic) { options.timeout_ms = 1500; options.max_retry = 3; - /* add peer - 非法 copyset */ + /* Add peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -188,7 +180,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* add peer - 非法 peerid */ + /* add peer - illegal peerid */ { PeerId leaderId = leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leaderId); @@ -210,12 +202,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_EQ(EINVAL, cntl.ErrorCode()); LOG(INFO) << "add peer: " << cntl.ErrorText(); } - /* add peer - 发送给不是leader的peer */ + /* add peer - sent to peers who are not leader */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -240,13 +232,13 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* remove peer - 非法 copyset */ + /* remove peer - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; ASSERT_EQ(0, channel.Init(leaderId.addr, NULL)); RemovePeerRequest request; - /* 非法 copyset */ + /* Illegal copyset */ request.set_logicpoolid(logicPoolId + 1); request.set_copysetid(copysetId); request.set_leader_id(leaderId.to_string()); @@ -261,7 +253,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(ENOENT, cntl.ErrorCode()); } - /* remove peer - 非法 peer id */ + /* remove peer - illegal peer id */ { PeerId leaderId = leader; brpc::Channel channel; @@ -281,12 +273,12 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* remove peer - 发送给不是 leader 的 peer */ + /* remove peer - sent to peers who are not leaders */ { PeerId leaderId; LOG(INFO) << "true leader is: " << leader.to_string(); - if (0 - == strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { + if (0 == + strcmp(leader.to_string().c_str(), peer1.to_string().c_str())) { leaderId = peer2; } else { leaderId = peer1; @@ -309,7 +301,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EPERM, cntl.ErrorCode()); } - /* transfer leader - 非法 copyset */ + /* transfer leader - illegal copyset */ { PeerId leaderId = leader; brpc::Channel channel; @@ -346,7 +338,7 @@ TEST_F(BraftCliServiceTest, basic) { stub.transfer_leader(&cntl, &request, &response, NULL); ASSERT_FALSE(cntl.Failed()); } - /* transfer leader - 非法 peer */ + /* transfer leader - illegal peer */ { PeerId leaderId = leader; brpc::Channel channel; @@ -365,7 +357,7 @@ TEST_F(BraftCliServiceTest, basic) { ASSERT_TRUE(cntl.Failed()); ASSERT_EQ(EINVAL, cntl.ErrorCode()); } - /* get leader - 非法 copyset */ + /* get leader - illegal copyset */ { PeerId leaderId = leaderId; brpc::Channel channel; diff --git a/test/chunkserver/chunk_service_test.cpp b/test/chunkserver/chunk_service_test.cpp index 3968766d91..b62f02f9c8 100644 --- a/test/chunkserver/chunk_service_test.cpp +++ b/test/chunkserver/chunk_service_test.cpp @@ -20,24 +20,24 @@ * Author: wudemiao */ +#include "src/chunkserver/chunk_service.h" -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -75,11 +75,10 @@ class ChunkserverTest : public testing::Test { butil::AtExitManager atExitManager; - TEST_F(ChunkserverTest, normal_read_write_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9020; - const char *confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; + const char* confs = "127.0.0.1:9020:0,127.0.0.1:9021:0,127.0.0.1:9022:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -96,12 +95,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -111,12 +106,8 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -126,16 +117,12 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -152,6 +139,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -313,7 +301,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /* Delete a non-existent chunk (duplicate deletion) */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -329,7 +317,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, response.status()); } - /* Read 一个不存在的 Chunk */ + /* Read a non-existent Chunk */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -347,7 +335,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST, response.status()); } - /* Applied index Read 一个不存在的 Chunk */ + /* Applied index Read a non-existent Chunk */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -416,9 +404,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, @@ -435,9 +421,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { request.set_copysetid(copysetId); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN, @@ -467,7 +451,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(0, channel.Init(leader.addr, NULL)); ChunkService_Stub stub(&channel); - // get hash : 访问不存在的chunk + // Get hash: Access non-existent chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -485,7 +469,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_STREQ("0", response.hash().c_str()); } - // get hash : 非法的offset和length + // Get hash: illegal offset and length { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -560,7 +544,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(1, response.chunksn().size()); } - // get hash : 访问存在的chunk + // Get hash: Access existing chunks { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -579,7 +563,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } - /* 多 chunk read/write/delete */ + /* Multi chunk read/write/delete */ { brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { @@ -685,7 +669,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* delete 一个不存在的 chunk(重复删除) */ + /* Delete a non-existent chunk (duplicate deletion) */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -703,7 +687,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { } } } - /* read 一个不存在的 chunk */ + /* Read a non-existent chunk */ { brpc::Channel channel; uint32_t requestSize = kOpRequestAlignSize; @@ -770,7 +754,7 @@ TEST_F(ChunkserverTest, normal_read_write_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS, response.status()); } - /* read 一个不存在的 chunk */ + /* Read a non-existent chun */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); diff --git a/test/chunkserver/chunk_service_test2.cpp b/test/chunkserver/chunk_service_test2.cpp index 674220d91a..ef7ecf2ebd 100644 --- a/test/chunkserver/chunk_service_test2.cpp +++ b/test/chunkserver/chunk_service_test2.cpp @@ -20,24 +20,23 @@ * Author: wudemiao */ - -#include -#include -#include -#include -#include #include #include #include +#include +#include +#include +#include +#include #include "include/chunkserver/chunkserver_common.h" +#include "proto/copyset.pb.h" +#include "src/chunkserver/chunk_service.h" +#include "src/chunkserver/cli.h" #include "src/chunkserver/copyset_node.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/cli.h" -#include "proto/copyset.pb.h" -#include "test/chunkserver/chunkserver_test_util.h" #include "src/common/uuid.h" -#include "src/chunkserver/chunk_service.h" +#include "test/chunkserver/chunkserver_test_util.h" namespace curve { namespace chunkserver { @@ -76,9 +75,9 @@ class ChunkService2Test : public testing::Test { butil::AtExitManager atExitManager; TEST_F(ChunkService2Test, illegial_parameters_test) { - const char *ip = "127.0.0.1"; + const char* ip = "127.0.0.1"; int port = 9023; - const char *confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; + const char* confs = "127.0.0.1:9023:0,127.0.0.1:9024:0,127.0.0.1:9025:0"; int rpcTimeoutMs = 3000; int snapshotInterval = 600; @@ -95,12 +94,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid1) { std::string copysetdir = "local://./" + dir1; - StartChunkserver(ip, - port + 0, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 0, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -110,12 +105,8 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid2) { std::string copysetdir = "local://./" + dir2; - StartChunkserver(ip, - port + 1, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 1, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } @@ -125,16 +116,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_TRUE(false); } else if (0 == pid3) { std::string copysetdir = "local://./" + dir3; - StartChunkserver(ip, - port + 2, - copysetdir.c_str(), - confs, - snapshotInterval, - electionTimeoutMs); + StartChunkserver(ip, port + 2, copysetdir.c_str(), confs, + snapshotInterval, electionTimeoutMs); return; } - /* 保证进程一定会退出 */ + /* Ensure that the process will definitely exit */ class WaitpidGuard { public: WaitpidGuard(pid_t pid1, pid_t pid2, pid_t pid3) { @@ -151,6 +138,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { kill(pid3_, SIGINT); waitpid(pid3_, &waitState, 0); } + private: pid_t pid1_; pid_t pid2_; @@ -177,13 +165,13 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { LOG_IF(INFO, status.ok()) << "leader id: " << leader.to_string(); ASSERT_TRUE(status.ok()); - /* 非法参数 request 测试 */ + /* Illegal parameter request test */ brpc::Channel channel; if (channel.Init(leader.addr, NULL) != 0) { LOG(ERROR) << "Fail to init channel to " << leader; } ChunkService_Stub stub(&channel); - /* read 溢出 */ + /* Read overflow */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -201,7 +189,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read offset没对齐 */ + /* Read offset not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -219,7 +207,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read size没对齐 */ + /* Read size not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -237,7 +225,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read copyset 不存在 */ + /* Read copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -256,7 +244,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* read snapshot 溢出 */ + /* Read snapshot overflow */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -274,7 +262,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot offset没对齐 */ + /* Read snapshot offset not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -293,7 +281,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot size没对齐 */ + /* Read snapshot size not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -312,7 +300,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* read snapshot copyset 不存在 */ + /* Read snapshot copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -331,7 +319,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* write 溢出 */ + /* Write overflow */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -350,7 +338,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write offset没对齐 */ + /* Write offset not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -369,7 +357,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write size没对齐 */ + /* Write size not aligned */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -388,7 +376,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_INVALID_REQUEST, response.status()); } - /* write copyset 不存在 */ + /* The write copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -407,7 +395,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete copyset 不存在*/ + /* Delete copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -423,7 +411,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* delete snapshot copyset 不存在*/ + /* Delete snapshot copyset does not exist */ { brpc::Controller cntl; cntl.set_timeout_ms(rpcTimeoutMs); @@ -434,9 +422,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { request.set_copysetid(copysetId + 1); request.set_chunkid(chunkId); request.set_correctedsn(sn); - stub.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + stub.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, @@ -456,7 +442,7 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, response.status()); } - /* 不是 leader */ + /* Not a leader */ { PeerId peer1; PeerId peer2; @@ -562,13 +548,12 @@ TEST_F(ChunkService2Test, illegial_parameters_test) { class ChunkServiceTestClosure : public ::google::protobuf::Closure { public: - explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit ChunkServiceTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~ChunkServiceTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -580,13 +565,12 @@ class ChunkServiceTestClosure : public ::google::protobuf::Closure { class UpdateEpochTestClosure : public ::google::protobuf::Closure { public: - explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) { - } + explicit UpdateEpochTestClosure(int sleepUs = 0) : sleep_(sleepUs) {} virtual ~UpdateEpochTestClosure() = default; void Run() override { if (0 != sleep_) { - // 睡眠一会方面测试,overload + // Sleep test, overload ::usleep(sleep_); LOG(INFO) << "return rpc"; } @@ -602,12 +586,12 @@ TEST_F(ChunkService2Test, overload_test) { // inflight throttle uint64_t maxInflight = 0; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -690,9 +674,7 @@ TEST_F(ChunkService2Test, overload_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -750,12 +732,12 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { // inflight throttle uint64_t maxInflight = 10; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -780,17 +762,17 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { }; std::vector threads; - // 启动10个线程,将chunkserver压满 + // Start 10 threads to fully load the chunkserver for (int i = 0; i < 10; ++i) { std::thread t1(writeFunc); threads.push_back(std::move(t1)); } - // 等待进程启动起来 + // Waiting for the process to start ::usleep(500 * 1000); ASSERT_FALSE(inflightThrottle->IsOverLoad()); - // 压满之后chunkserver后面收到的request都会被拒绝 + // All requests received after the chunkserver is filled will be rejected // write chunk { brpc::Controller cntl; @@ -863,9 +845,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -916,7 +896,8 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } - // 等待request处理完成,之后chunkserver又重新可以接收新的request + // Wait for the request processing to complete, and then chunkserver can + // receive new requests again for (auto it = threads.begin(); it != threads.end(); ++it) { it->join(); } @@ -995,9 +976,7 @@ TEST_F(ChunkService2Test, overload_concurrency_test) { request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); request.set_chunkid(chunkId); - chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, - &request, - &response, + chunkService.DeleteChunkSnapshotOrCorrectSn(&cntl, &request, &response, &done); ASSERT_NE(CHUNK_OP_STATUS::CHUNK_OP_STATUS_OVERLOAD, response.status()); } @@ -1055,12 +1034,12 @@ TEST_F(ChunkService2Test, CheckEpochTest) { // inflight throttle uint64_t maxInflight = 10000; - std::shared_ptr inflightThrottle - = std::make_shared(maxInflight); + std::shared_ptr inflightThrottle = + std::make_shared(maxInflight); CHECK(nullptr != inflightThrottle) << "new inflight throttle failed"; // chunk service - CopysetNodeManager &nodeManager = CopysetNodeManager::GetInstance(); + CopysetNodeManager& nodeManager = CopysetNodeManager::GetInstance(); ChunkServiceOptions chunkServiceOptions; chunkServiceOptions.copysetNodeManager = &nodeManager; chunkServiceOptions.inflightThrottle = inflightThrottle; @@ -1083,7 +1062,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_chunkid(chunkId); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk request have epoch, but epoch map have no epoch @@ -1100,7 +1079,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // update epoch map to {(1, 1) , (2, 2)} { @@ -1130,7 +1109,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_COPYSET_NOTEXIST, - response.status()); + response.status()); } // write chunk check epoch failed { @@ -1146,7 +1125,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } // update epoch map to {(1, 2) , (2, 2)} @@ -1174,7 +1153,7 @@ TEST_F(ChunkService2Test, CheckEpochTest) { request.set_epoch(1); chunkService.WriteChunk(&cntl, &request, &response, &done); ASSERT_EQ(CHUNK_OP_STATUS::CHUNK_OP_STATUS_EPOCH_TOO_OLD, - response.status()); + response.status()); } } diff --git a/test/chunkserver/chunkserver_helper_test.cpp b/test/chunkserver/chunkserver_helper_test.cpp index e9d538bf0c..d401a22185 100644 --- a/test/chunkserver/chunkserver_helper_test.cpp +++ b/test/chunkserver/chunkserver_helper_test.cpp @@ -20,14 +20,16 @@ * Author: lixiaocui */ -#include #include "src/chunkserver/chunkserver_helper.h" + +#include + #include "src/chunkserver/register.h" namespace curve { namespace chunkserver { TEST(ChunkServerMeta, test_encode_and_decode) { - // 1. 正常编解码 + // 1. Normal encoding and decoding ChunkServerMetadata metadata; metadata.set_version(CURRENT_METADATA_VERSION); metadata.set_id(1); @@ -43,13 +45,13 @@ TEST(ChunkServerMeta, test_encode_and_decode) { ASSERT_EQ(metadata.id(), metaOut.id()); ASSERT_EQ(metadata.token(), metaOut.token()); - // 2. 编码异常 + // 2. Encoding anomaly metadata.clear_token(); strOut.clear(); ASSERT_FALSE( ChunkServerMetaHelper::EncodeChunkServerMeta(metadata, &strOut)); - // 3. 解码异常 + // 3. Decoding exception metadata.set_token("hello"); metadata.set_checksum(9999); ASSERT_TRUE( diff --git a/test/chunkserver/chunkserver_service_test.cpp b/test/chunkserver/chunkserver_service_test.cpp index 106501e267..4b834a5037 100644 --- a/test/chunkserver/chunkserver_service_test.cpp +++ b/test/chunkserver/chunkserver_service_test.cpp @@ -20,40 +20,41 @@ * Author: lixiaocui1 */ -#include -#include +#include "src/chunkserver/chunkserver_service.h" + #include +#include #include +#include #include -#include "src/chunkserver/chunkserver_service.h" -#include "test/chunkserver/mock_copyset_node_manager.h" + #include "proto/chunkserver.pb.h" +#include "test/chunkserver/mock_copyset_node_manager.h" namespace curve { namespace chunkserver { -using ::testing::Return; using ::testing::_; +using ::testing::Return; TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { - // 启动ChunkServerService + // Start ChunkServerService auto server = new brpc::Server(); MockCopysetNodeManager* copysetNodeManager = new MockCopysetNodeManager(); ChunkServerServiceImpl* chunkserverService = new ChunkServerServiceImpl(copysetNodeManager); - ASSERT_EQ(0, - server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); + ASSERT_EQ( + 0, server->AddService(chunkserverService, brpc::SERVER_OWNS_SERVICE)); ASSERT_EQ(0, server->Start("127.0.0.1", {5900, 5999}, nullptr)); auto listenAddr = butil::endpoint2str(server->listen_address()).c_str(); - brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr, NULL)); ChunkServerService_Stub stub(&channel); ChunkServerStatusRequest request; ChunkServerStatusResponse response; - // 1. 指定chunkserver加载copyset完成 + // 1. Specify chunkserver to load copyset complete { EXPECT_CALL(*copysetNodeManager, LoadFinished()) .WillOnce(Return(false)); @@ -63,23 +64,22 @@ TEST(ChunkServerServiceImplTest, test_ChunkServerStatus) { ASSERT_FALSE(response.copysetloadfin()); } - // 2. 指定chunkserver加载copyset未完成 + // 2. The specified chunkserver loading copyset did not complete { - EXPECT_CALL(*copysetNodeManager, LoadFinished()) - .WillOnce(Return(true)); + EXPECT_CALL(*copysetNodeManager, LoadFinished()).WillOnce(Return(true)); brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); ASSERT_FALSE(cntl.Failed()); ASSERT_TRUE(response.copysetloadfin()); } - // 停止chunkserver service + // Stop chunkserver service server->Stop(0); server->Join(); delete server; server = nullptr; - // 3. 未获取到指定chunkserver加载copyset状态 + // 3. Unable to obtain the specified chunkserver loading copyset status { brpc::Controller cntl; stub.ChunkServerStatus(&cntl, &request, &response, nullptr); diff --git a/test/common/bitmap_test.cpp b/test/common/bitmap_test.cpp index 8bb85b01ad..8e80e255df 100644 --- a/test/common/bitmap_test.cpp +++ b/test/common/bitmap_test.cpp @@ -20,10 +20,10 @@ * Author: yangyaokai */ -#include - #include "src/common/bitmap.h" +#include + namespace curve { namespace common { @@ -62,7 +62,7 @@ TEST(BitmapTEST, constructor_test) { delete[] mem; } - // 测试拷贝构造 + // Test copy construction { Bitmap bitmap1(32); Bitmap bitmap2(bitmap1); @@ -72,7 +72,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试赋值操作 + // Test assignment operation { Bitmap bitmap1(32); Bitmap bitmap2(16); @@ -88,7 +88,7 @@ TEST(BitmapTEST, constructor_test) { } } - // 测试比较操作符 + // Test comparison operator { Bitmap bitmap1(16); Bitmap bitmap2(16); @@ -229,7 +229,7 @@ TEST(BitmapTEST, divide_test) { vector clearRanges; vector setRanges; - // 所有位为0 + // All bits are 0 { bitmap.Clear(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -241,7 +241,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 所有位为1 + // All bits are 1 { bitmap.Set(); bitmap.Divide(0, 31, &clearRanges, &setRanges); @@ -253,7 +253,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为clear range,末尾为set range + // Two ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 16); bitmap.Set(17, 31); @@ -268,7 +268,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 两个range,起始为 set range,末尾为 clear range + // Two ranges, starting with set range and ending with clear range { bitmap.Set(0, 16); bitmap.Clear(17, 31); @@ -283,7 +283,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 set range,中间为 clear range + // Three ranges, with set range at the beginning and end, and clear range + // in the middle { bitmap.Set(0, 8); bitmap.Clear(9, 25); @@ -301,7 +302,8 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 三个range,头尾为 clear range,中间为 set range + // Three ranges, with clear range at the beginning and end, and set range + // in the middle { bitmap.Clear(0, 8); bitmap.Set(9, 25); @@ -319,7 +321,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 clear range,末尾为 set range + // Four ranges, starting with a clear range and ending with a set range { bitmap.Clear(0, 7); bitmap.Set(8, 15); @@ -340,7 +342,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 四个range,头为 set range,末尾为 clear range + // Four ranges, starting with set range and ending with clear range { bitmap.Set(0, 7); bitmap.Clear(8, 15); @@ -361,7 +363,7 @@ TEST(BitmapTEST, divide_test) { setRanges.clear(); } - // 复杂场景随机偏移测试 + // Random offset testing for complex scenes { bitmap.Set(0, 5); bitmap.Clear(6, 9); diff --git a/test/common/channel_pool_test.cpp b/test/common/channel_pool_test.cpp index e327f6f82f..d573142cf0 100644 --- a/test/common/channel_pool_test.cpp +++ b/test/common/channel_pool_test.cpp @@ -20,30 +20,30 @@ * Author: charisu */ -#include - #include "src/common/channel_pool.h" +#include + namespace curve { namespace common { TEST(Common, ChannelPool) { ChannelPool channelPool; ChannelPtr channelPtr; - // 地址非法,init失败 + // Illegal address, init failed std::string addr = "127.0.0.1:80000"; ASSERT_EQ(-1, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_FALSE(channelPtr); - // 地址合法,init成功 + // The address is legal, init succeeded addr = "127.0.0.1:8000"; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr)); ASSERT_TRUE(channelPtr); - // 同一个地址应该返回同一个channelPtr + // The same address should return the same channelPtr ChannelPtr channelPtr2; ASSERT_EQ(0, channelPool.GetOrInitChannel(addr, &channelPtr2)); ASSERT_TRUE(channelPtr2); ASSERT_EQ(channelPtr, channelPtr2); - // 清空 + // Clear channelPool.Clear(); } diff --git a/test/integration/chunkserver/chunkserver_basic_test.cpp b/test/integration/chunkserver/chunkserver_basic_test.cpp index a36bfedcee..6181e40bfa 100644 --- a/test/integration/chunkserver/chunkserver_basic_test.cpp +++ b/test/integration/chunkserver/chunkserver_basic_test.cpp @@ -24,8 +24,8 @@ #include #include -#include #include +#include #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/chunkservice_op.h" @@ -49,24 +49,23 @@ static constexpr uint32_t kOpRequestAlignSize = 4096; const char* kFakeMdsAddr = "127.0.0.1:9079"; -static const char *chunkServerParams[1][16] = { - { "chunkserver", "-chunkServerIp=127.0.0.1", - "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, - "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", - "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkserver.dat", - "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", - "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", - "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", - "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/chunkfilepool.meta", - "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", - "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT - "/walfilepool.meta", - "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", - "-raft_sync_segments=true", NULL }, +static const char* chunkServerParams[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", + "-chunkServerPort=" BASIC_TEST_CHUNK_SERVER_PORT, + "-chunkServerStoreUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/", + "-chunkServerMetaUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkserver.dat", + "-copySetUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftSnapshotUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-raftLogUri=curve://./" BASIC_TEST_CHUNK_SERVER_PORT "/copysets", + "-recycleUri=local://./" BASIC_TEST_CHUNK_SERVER_PORT "/recycler", + "-chunkFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkfilepool/", + "-chunkFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT + "/chunkfilepool.meta", + "-walFilePoolDir=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool/", + "-walFilePoolMetaPath=./" BASIC_TEST_CHUNK_SERVER_PORT "/walfilepool.meta", + "-conf=./" BASIC_TEST_CHUNK_SERVER_PORT "/chunkserver.conf", + "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -107,7 +106,7 @@ class ChunkServerIoTest : public testing::Test { paramsIndexs_[PeerCluster::PeerToId(peer1_)] = 0; params_.push_back(const_cast(chunkServerParams[0])); - // 初始化chunkfilepool,这里会预先分配一些chunk + // Initialize chunkfilepool, where some chunks will be pre allocated lfs_ = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); poolDir_ = "./" + std::to_string(PeerCluster::PeerToId(peer1_)) + "/chunkfilepool/"; @@ -125,11 +124,11 @@ class ChunkServerIoTest : public testing::Test { ::system(rmdir1.c_str()); - // 等待进程结束 + // Waiting for the process to end ::usleep(100 * 1000); } - int InitCluster(PeerCluster *cluster) { + int InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs_); @@ -139,7 +138,7 @@ class ChunkServerIoTest : public testing::Test { return -1; } - // 等待leader产生 + // Waiting for the leader to be generated if (cluster->WaitLeader(&leaderPeer_)) { LOG(ERROR) << "WaiteLeader failed"; return -1; @@ -168,45 +167,50 @@ class ChunkServerIoTest : public testing::Test { std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - /* 场景一:新建的文件,Chunk文件不存在 */ + /* + * Scenario 1: Newly created file, Chunk file does not exist + */ ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, 0, length, nullptr)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - /* 场景二:通过WriteChunk产生chunk文件后操作 */ + /* + * Scenario 2: After generating a chunk file through WriteChunk, + * perform the operation + */ data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); + data.c_str(), &chunkData)); ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunkId, sn1, NULL_SN, leader)); - ASSERT_EQ(0, verify->VerifyReadChunk( - chunkId, sn1, 0, 4 * KB, &chunkData)); + ASSERT_EQ(0, + verify->VerifyReadChunk(chunkId, sn1, 0, 4 * KB, &chunkData)); ASSERT_EQ(0, verify->VerifyReadChunk(chunkId, sn1, kChunkSize - 4 * KB, - 4 * KB, nullptr)); + 4 * KB, nullptr)); data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 0, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 4 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); data.assign(length * 2, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunkId, sn1, 4 * KB, 8 * KB, - data.c_str(), &chunkData)); - ASSERT_EQ(0, - verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); + data.c_str(), &chunkData)); + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunkId, sn1, 0, 12 * KB, &chunkData)); - /* 场景三:用户删除文件 */ + /* Scenario 3: User deletes files */ ASSERT_EQ(0, verify->VerifyDeleteChunk(chunkId, sn1)); - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunkId, NULL_SN, NULL_SN, leader)); + ASSERT_EQ( + 0, verify->VerifyGetChunkInfo(chunkId, NULL_SN, NULL_SN, leader)); } void TestSnapshotIO(std::shared_ptr verify) { @@ -217,150 +221,164 @@ class ChunkServerIoTest : public testing::Test { const SequenceNum sn3 = 3; int length = kOpRequestAlignSize; std::string data(length * 4, 0); - std::string chunkData1a(kChunkSize, 0); // chunk1版本1预期数据 - std::string chunkData1b(kChunkSize, 0); // chunk1版本2预期数据 - std::string chunkData1c(kChunkSize, 0); // chunk1版本3预期数据 - std::string chunkData2(kChunkSize, 0); // chunk2预期数据 + std::string chunkData1a(kChunkSize, + 0); // chunk1 version 1 expected data + std::string chunkData1b(kChunkSize, + 0); // chunk1 version 2 expected data + std::string chunkData1c(kChunkSize, + 0); // chunk1 version 3 expected data + std::string chunkData2(kChunkSize, 0); // chunk2 expected data std::string leader = ""; PeerCluster cluster("InitShutdown-cluster", logicPoolId_, copysetId_, - peers_, params_, paramsIndexs_); + peers_, params_, paramsIndexs_); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); ASSERT_EQ(0, InitCluster(&cluster)); - // 构造初始环境 - // 写chunk1产生chunk1,chunk1版本为1,chunk2开始不存在。 + // Construct initial environment + // Writing chunk1 generates chunk1, which is version 1 and does not + // exist at the beginning of chunk2. data.assign(length, 'a'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn1, 0, 12 * KB, - data.c_str(), &chunkData1a)); + data.c_str(), &chunkData1a)); /* - * 场景一:第一次给文件打快照 - */ - chunkData1b.assign(chunkData1a); // 模拟对chunk1数据进行COW + * Scenario 1: Taking a snapshot of a file for the first time + */ + chunkData1b.assign(chunkData1a); // Simulate COW on chunk1 data data.assign(length, 'b'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); - // 重复写入同一区域,用于验证不会重复cow + data.c_str(), &chunkData1b)); + // Write repeatedly to the same area to verify that there will be no + // duplicate rows data.assign(length, 'c'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 4 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 读取chunk1快照,预期读到版本1数据 + // Reading chunk1 snapshot, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // chunk1写[0, 4KB] + // Chunk1 write [0, 4KB] data.assign(length, 'd'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 0, 4 * KB, - data.c_str(), &chunkData1b)); - // chunk1写[4KB, 16KB] + data.c_str(), &chunkData1b)); + // Chunk1 write [4KB, 16KB] data.assign(length, 'e'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn2, 4 * KB, 12 * KB, - data.c_str(), &chunkData1b)); + data.c_str(), &chunkData1b)); - // 获取chunk1信息,预期其版本为2,快照版本为1, + // Obtain chunk1 information, with expected version 2 and snapshot + // version 1, ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, sn1, leader)); - // chunk1读[0, 12KB], 预期读到版本2数据 - ASSERT_EQ(0, - verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); + // Chunk1 read [0, 12KB], expected to read version 2 data + ASSERT_EQ( + 0, verify->VerifyReadChunk(chunk1, sn2, 0, 12 * KB, &chunkData1b)); - // 读取chunk1的快照, 预期读到版本1数据 + // Reading snapshot of chunk1, expected to read version 1 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn1, 0, 12 * KB, - &chunkData1a)); + &chunkData1a)); - // 读取chunk2的快照, 预期chunk不存在 - ASSERT_EQ(0, verify->VerifyReadChunkSnapshot( - chunk2, sn1, 0, 12 * KB, nullptr)); + // Reading snapshot of chunk2, expected chunk not to exist + ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn1, 0, 12 * KB, + nullptr)); /* - * 场景二:第一次快照结束,删除快照 - */ - // 删除chunk1快照 + * Scenario 2: The first snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); - // 获取chunk1信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn2)); + // Obtain chunk1 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn2, NULL_SN, leader)); - // 删chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn2)); - // chunk2写[0, 8KB] + // Chunk2 write [0, 8KB] data.assign(length, 'f'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn2, 0, 8 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为2,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); /* - * 场景三:第二次打快照 - */ - // chunk1写[0, 8KB] - chunkData1c.assign(chunkData1b); // 模拟对chunk1数据进行COW + * Scenario 3: Taking a second snapshot + */ + // Chunk1 write [0, 8KB] + chunkData1c.assign(chunkData1b); // Simulate COW on chunk1 data data.assign(length, 'g'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk1, sn3, 0, 8 * KB, - data.c_str(), &chunkData1c)); - // 获取chunk1信息,预期其版本为3,快照版本为2 + data.c_str(), &chunkData1c)); + // Obtain chunk1 information, expect its version to be 3 and snapshot + // version to be 2 ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk1, sn3, sn2, leader)); - // 读取chunk1的快照, 预期读到版本2数据 + // Reading snapshot of chunk1, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk1, sn2, 0, 12 * KB, - &chunkData1b)); + &chunkData1b)); - // 读取chunk2的快照, 预期读到版本2数据 + // Reading snapshot of chunk2, expected to read version 2 data ASSERT_EQ(0, verify->VerifyReadChunkSnapshot(chunk2, sn2, 0, 8 * KB, - &chunkData2)); + &chunkData2)); - // 删除chunk1文件,预期成功,本地快照存在的情况下,会将快照也一起删除 + // Delete chunk1 file, expected success. If the local snapshot exists, + // the snapshot will also be deleted together ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); + verify->VerifyDeleteChunk(chunk1, sn3)); /* - * 场景四:第二次快照结束,删除快照 - */ - // 删除chunk1快照,因为chunk1及其快照上一步已经删除,预期成功 + * Scenario 4: The second snapshot ends and the snapshot is deleted + */ + // Delete chunk1 snapshot because chunk1 and its snapshot have been + // deleted in the previous step and are expected to succeed ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2快照,预期成功 + // Delete chunk2 snapshot, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); - // 获取chunk2信息,预期其版本为2,无快照版本 + verify->VerifyDeleteChunkSnapshotOrCorrectSn(chunk2, sn3)); + // Obtain chunk2 information, expect its version to be 2, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn2, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'h'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, - data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + data.c_str(), &chunkData2)); + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); - // chunk2写[0, 4KB] + // Chunk2 write [0, 4KB] data.assign(length, 'i'); ASSERT_EQ(0, verify->VerifyWriteChunk(chunk2, sn3, 0, 4 * KB, data.c_str(), &chunkData2)); - // 获取chunk2信息,预期其版本为3,无快照版本 + // Obtain chunk2 information, expect its version to be 3, no snapshot + // version ASSERT_EQ(0, verify->VerifyGetChunkInfo(chunk2, sn3, NULL_SN, leader)); /* - * 场景五:用户删除文件 - */ - // 删除chunk1,已不存在,预期成功 + * Scenario 5: User deletes files + */ + // Delete chunk1, it no longer exists, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk1, sn3)); - // 获取chunk1信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk1, NULL_SN, NULL_SN, leader)); - // 删除chunk2,预期成功 + verify->VerifyDeleteChunk(chunk1, sn3)); + // Obtaining chunk1 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk1, NULL_SN, NULL_SN, leader)); + // Delete chunk2, expected success ASSERT_EQ(CHUNK_OP_STATUS_SUCCESS, - verify->VerifyDeleteChunk(chunk2, sn3)); - // 获取chunk2信息,预期不存在 - ASSERT_EQ(0, verify->VerifyGetChunkInfo( - chunk2, NULL_SN, NULL_SN, leader)); + verify->VerifyDeleteChunk(chunk2, sn3)); + // Obtaining chunk2 information, expected not to exist + ASSERT_EQ(0, + verify->VerifyGetChunkInfo(chunk2, NULL_SN, NULL_SN, leader)); } public: @@ -370,7 +388,7 @@ class ChunkServerIoTest : public testing::Test { CopysetID copysetId_; std::map paramsIndexs_; - std::vector params_; + std::vector params_; std::string externalIp_; private: @@ -391,8 +409,8 @@ class ChunkServerIoTest : public testing::Test { * */ TEST_F(ChunkServerIoTest, BasicIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } @@ -401,15 +419,15 @@ TEST_F(ChunkServerIoTest, BasicIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestBasicIO(verify); } TEST_F(ChunkServerIoTest, SnapshotIO) { - struct ChunkServiceOpConf opConf = { &leaderPeer_, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&leaderPeer_, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } @@ -417,8 +435,8 @@ TEST_F(ChunkServerIoTest, SnapshotIO) { TEST_F(ChunkServerIoTest, SnapshotIO_from_external_ip) { Peer exPeer; exPeer.set_address(externalIp_ + ":" + BASIC_TEST_CHUNK_SERVER_PORT + ":0"); - struct ChunkServiceOpConf opConf = { &exPeer, logicPoolId_, copysetId_, - 2000 }; + struct ChunkServiceOpConf opConf = {&exPeer, logicPoolId_, copysetId_, + 2000}; auto verify = std::make_shared(&opConf); TestSnapshotIO(verify); } diff --git a/test/integration/chunkserver/chunkserver_clone_recover.cpp b/test/integration/chunkserver/chunkserver_clone_recover.cpp index 58ce282336..6b9da23b79 100644 --- a/test/integration/chunkserver/chunkserver_clone_recover.cpp +++ b/test/integration/chunkserver/chunkserver_clone_recover.cpp @@ -20,9 +20,9 @@ * Author: qinyi */ -#include -#include #include +#include +#include #include #include @@ -30,14 +30,14 @@ #include #include "include/client/libcurve.h" -#include "src/common/s3_adapter.h" -#include "src/common/timeutility.h" -#include "src/client/inflight_controller.h" #include "src/chunkserver/cli2.h" +#include "src/client/inflight_controller.h" #include "src/common/concurrent/count_down_event.h" -#include "test/integration/common/chunkservice_op.h" +#include "src/common/s3_adapter.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" +#include "test/integration/common/chunkservice_op.h" #include "test/util/config_generator.h" using curve::CurveCluster; @@ -91,11 +91,11 @@ const uint32_t kChunkSize = 16 * 1024 * 1024; const uint32_t kChunkServerMaxIoSize = 64 * 1024; const std::vector mdsConf0{ - { "--confPath=" + MDS0_CONF_PATH }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME }, - { "--sessionInterSec=20" }, - { "--etcdAddr=" + ETCD_CLIENT_IP_PORT }, + {"--confPath=" + MDS0_CONF_PATH}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--mdsDbName=" + CSCLONE_TEST_MDS_DBNAME}, + {"--sessionInterSec=20"}, + {"--etcdAddr=" + ETCD_CLIENT_IP_PORT}, }; const std::vector mdsFileConf0{ @@ -129,73 +129,67 @@ const std::vector csCommonConf{ }; const std::vector chunkserverConf1{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER0_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER0_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER0_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER0_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER0_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER0_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER0_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER0_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf2{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER1_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER1_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER1_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER1_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER1_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER1_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER1_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER1_BASE_DIR + "/walfilepool.meta"}}; const std::vector chunkserverConf3{ - { "-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR }, - { "-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + - "/chunkserver.dat" }, - { "-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets" }, - { "-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler" }, - { "-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool" }, - { "-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + - "/chunkfilepool.meta" }, - { "-conf=" + CHUNKSERVER_CONF_PATH }, - { "-raft_sync_segments=true" }, - { "--log_dir=" + CSCLONE_BASE_DIR }, - { "--graceful_quit_on_sigterm" }, - { "-chunkServerIp=127.0.0.1" }, - { "-chunkServerPort=" + CHUNK_SERVER2_PORT }, - { "-enableChunkfilepool=false" }, - { "-enableWalfilepool=false" }, - { "-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool" }, - { "-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta" } -}; + {"-chunkServerStoreUri=local://" + CHUNKSERVER2_BASE_DIR}, + {"-chunkServerMetaUri=local://" + CHUNKSERVER2_BASE_DIR + + "/chunkserver.dat"}, + {"-copySetUri=local://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftSnapshotUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-raftLogUri=curve://" + CHUNKSERVER2_BASE_DIR + "/copysets"}, + {"-recycleUri=local://" + CHUNKSERVER2_BASE_DIR + "/recycler"}, + {"-chunkFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/filepool"}, + {"-chunkFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/chunkfilepool.meta"}, + {"-conf=" + CHUNKSERVER_CONF_PATH}, + {"-raft_sync_segments=true"}, + {"--log_dir=" + CSCLONE_BASE_DIR}, + {"--graceful_quit_on_sigterm"}, + {"-chunkServerIp=127.0.0.1"}, + {"-chunkServerPort=" + CHUNK_SERVER2_PORT}, + {"-enableChunkfilepool=false"}, + {"-enableWalfilepool=false"}, + {"-walFilePoolDir=" + CHUNKSERVER2_BASE_DIR + "/walfilepool"}, + {"-walFilePoolMetaPath=" + CHUNKSERVER2_BASE_DIR + "/walfilepool.meta"}}; namespace curve { namespace chunkserver { @@ -203,7 +197,9 @@ namespace chunkserver { class CSCloneRecoverTest : public ::testing::Test { public: CSCloneRecoverTest() - : logicPoolId_(1), copysetId_(1), chunkData1_(kChunkSize, 'X'), + : logicPoolId_(1), + copysetId_(1), + chunkData1_(kChunkSize, 'X'), chunkData2_(kChunkSize, 'Y') {} void SetUp() { @@ -217,11 +213,11 @@ class CSCloneRecoverTest : public ::testing::Test { s3Conf); cluster_->PrepareConfig(MDS0_CONF_PATH, mdsFileConf0); - // 生成chunkserver配置文件 + // Generate chunkserver configuration file cluster_->PrepareConfig(CHUNKSERVER_CONF_PATH, csCommonConf); - // 1. 启动etcd + // 1. Start etcd LOG(INFO) << "begin to start etcd"; pid_t pid = cluster_->StartSingleEtcd( 1, ETCD_CLIENT_IP_PORT, ETCD_PEER_IP_PORT, @@ -231,19 +227,20 @@ class CSCloneRecoverTest : public ::testing::Test { ASSERT_GT(pid, 0); ASSERT_TRUE(cluster_->WaitForEtcdClusterAvalible(5)); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster_->StartSingleMDS(0, MDS0_IP_PORT, MDS0_DUMMY_PORT, - mdsConf0, true); + mdsConf0, true); LOG(INFO) << "mds 0 started on " + MDS0_IP_PORT + ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 生成topo.json + // Generate topo.json Json::Value topo; Json::Value servers; std::string chunkServerIpPort[] = {CHUNK_SERVER0_IP_PORT, - CHUNK_SERVER1_IP_PORT, - CHUNK_SERVER2_IP_PORT}; + CHUNK_SERVER1_IP_PORT, + CHUNK_SERVER2_IP_PORT}; for (int i = 0; i < 3; ++i) { Json::Value server; std::vector ipPort; @@ -278,7 +275,7 @@ class CSCloneRecoverTest : public ::testing::Test { topoConf << topo.toStyledString(); topoConf.close(); - // 3. 创建物理池 + // 3. Creating a physical pool string createPPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -291,13 +288,12 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createPPCmd: " << createPPCmd; ret = system(createPPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster_->StartSingleChunkServer(1, CHUNK_SERVER0_IP_PORT, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " + CHUNK_SERVER0_IP_PORT + @@ -319,7 +315,8 @@ class CSCloneRecoverTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first string createLPCmd = string("./bazel-bin/tools/curvefsTool") + string(" -cluster_map=" + CSCLONE_BASE_DIR + "/topo.json") + @@ -331,27 +328,26 @@ class CSCloneRecoverTest : public ::testing::Test { while (retry < 5) { LOG(INFO) << "exec createLPCmd: " << createLPCmd; ret = system(createLPCmd.c_str()); - if (ret == 0) - break; + if (ret == 0) break; retry++; } ASSERT_EQ(ret, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 获取chunkserver主节点 + // Obtain the chunkserver master node logicPoolId_ = 1; copysetId_ = 1; ASSERT_EQ(0, chunkSeverGetLeader()); - struct ChunkServiceOpConf conf0 = { &leaderPeer_, logicPoolId_, - copysetId_, 5000 }; + struct ChunkServiceOpConf conf0 = {&leaderPeer_, logicPoolId_, + copysetId_, 5000}; opConf_ = conf0; - // 6. 初始化client配置 + // 6. Initialize client configuration LOG(INFO) << "init globalclient"; ret = Init(clientConfPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 先睡眠5s,让chunkserver选出leader + // 7. Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); s3Adapter_.Init(kS3ConfigPath); @@ -417,10 +413,11 @@ class CSCloneRecoverTest : public ::testing::Test { system(("mkdir " + CHUNKSERVER2_BASE_DIR + "/filepool").c_str())); } - /**下发一个写请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @return: IO是否成功完成 + /** + * Issue a write request and wait for its completion. + * @param offset: The offset for the current IO to be issued. + * @param size: The size of the IO to be issued. + * @return: Whether the IO has been successfully completed. */ bool HandleAioWriteRequest(uint64_t offset, uint64_t size, const char* data) { @@ -432,7 +429,8 @@ class CSCloneRecoverTest : public ::testing::Test { char* buffer = reinterpret_cast(context->buf); delete[] buffer; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -447,8 +445,7 @@ class CSCloneRecoverTest : public ::testing::Test { int ret; if ((ret = AioWrite(fd_, context))) { - LOG(ERROR) << "failed to send aio write request, err=" - << ret; + LOG(ERROR) << "failed to send aio write request, err=" << ret; return false; } @@ -460,11 +457,12 @@ class CSCloneRecoverTest : public ::testing::Test { return true; } - /**下发一个读请求并等待完成 - * @param: offset是当前需要下发IO的偏移 - * @param: size是下发IO的大小 - * @data: 读出的数据 - * @return: IO是否成功完成 + /** + * Issue a read request and wait for its completion. + * @param offset: The current offset for the IO to be issued. + * @param size: The size of the IO to be issued. + * @param data: The read data. + * @return Whether the IO is successfully completed. */ bool HandleAioReadRequest(uint64_t offset, uint64_t size, char* data) { gCond.Reset(1); @@ -473,7 +471,8 @@ class CSCloneRecoverTest : public ::testing::Test { auto readCallBack = [](CurveAioContext* context) { gIoRet = context->ret; delete context; - // 无论IO是否成功,只要返回,就触发cond + // Regardless of whether IO is successful or not, as long as it + // returns, it triggers cond gCond.Signal(); }; @@ -485,8 +484,7 @@ class CSCloneRecoverTest : public ::testing::Test { context->cb = readCallBack; int ret; if ((ret = AioRead(fd_, context))) { - LOG(ERROR) << "failed to send aio read request, err=" - << ret; + LOG(ERROR) << "failed to send aio read request, err=" << ret; return false; } @@ -547,7 +545,7 @@ class CSCloneRecoverTest : public ::testing::Test { return -1; } - // 先睡眠5s,让chunkserver选出leader + // Sleep for 5 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(5)); status = curve::chunkserver::GetLeader(logicPoolId_, copysetId_, csConf, &leaderPeer_); @@ -559,26 +557,26 @@ class CSCloneRecoverTest : public ::testing::Test { } void prepareSourceDataInCurve() { - // 创建一个curveFS文件 + // Create a curveFS file LOG(INFO) << "create source curveFS file: " << CURVEFS_FILENAME; fd_ = curve::test::FileCommonOperation::Open(CURVEFS_FILENAME, "curve"); ASSERT_NE(fd_, -1); - // 写数据到curveFS的第1个chunk + // Write data to the first chunk of curveFS LOG(INFO) << "Write first 16MB of source curveFS file"; ASSERT_TRUE(HandleAioWriteRequest(0, kChunkSize, chunkData1_.c_str())); - // 读出数据进行验证 + // Read data for verification std::unique_ptr temp(new char[kChunkSize]); ASSERT_TRUE(HandleAioReadRequest(0, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData1_.c_str(), temp.get(), kChunkSize)); - // 写数据到curveFS的第2个chunk + // Write data to the second chunk of curveFS LOG(INFO) << "Write second 16MB of source curveFS file"; ASSERT_TRUE( HandleAioWriteRequest(kChunkSize, kChunkSize, chunkData2_.c_str())); - // 读出数据进行验证 + // Read data for verification ASSERT_TRUE(HandleAioReadRequest(kChunkSize, kChunkSize, temp.get())); ASSERT_EQ(0, strncmp(chunkData2_.c_str(), temp.get(), kChunkSize)); @@ -613,14 +611,14 @@ class CSCloneRecoverTest : public ::testing::Test { bool s3ObjExisted_; }; -// 场景一:通过ReadChunk从curve恢复克隆文件 +// Scenario 1: Restore clone files from curve through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { LOG(INFO) << "current case: CloneFromCurveByReadChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; ChunkID cloneChunk2 = 332; @@ -633,7 +631,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -647,7 +645,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -667,17 +665,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * After traversing a clone file, it will not be converted into a regular + * chunk file. Writing is performed by incrementing the version number: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -685,19 +684,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景二:通过RecoverChunk从curve恢复克隆文件 +// Scenario 2: Restore cloned files from curve through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { LOG(INFO) << "current case: CloneFromCurveByRecoverChunk"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 333; ChunkID cloneChunk2 = 334; @@ -710,7 +709,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -724,7 +723,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -750,16 +749,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件,写成功。 + * It is expected that the cloned file will be transformed into a regular + * chunk1 file. Writing is performed by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated, and the + * write will succeed. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -767,19 +768,19 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景三:lazy allocate场景下读克隆文件 +// Scenario 3: Lazy allocate scenario: Reading clone files TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { LOG(INFO) << "current case: CloneFromCurveByReadChunkWhenLazyAlloc"; - // 0. 在curve中写入源数据 + // 0. Write source data in curve prepareSourceDataInCurve(); - // 1. chunk文件不存在 + // 1. Chunk file does not exist ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 331; SequenceNum sn1 = 1; @@ -802,9 +803,9 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 将leader切换到follower + // Switch leader to follower ASSERT_EQ(0, TransferLeaderToFollower()); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); @@ -817,7 +818,7 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get(), CURVEFS_FILENAME, 0)); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk string ioBuf(kChunkServerMaxIoSize, 'c'); for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyWriteChunk( @@ -828,9 +829,10 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { cloneData1.get(), CURVEFS_FILENAME, 0)); /** - * clone文件遍写后会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * After traversing and writing a clone file, it will be transformed into a + * regular chunk file. Writing is performed by incrementing the version: + * - If it is a clone chunk, the write operation will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -838,18 +840,18 @@ TEST_F(CSCloneRecoverTest, CloneFromCurveByReadChunkWhenLazyAlloc) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); } -// 场景四:通过ReadChunk从S3恢复克隆文件 +// Scenario 4: Restore cloned files from S3 through ReadChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { LOG(INFO) << "current case: CloneFromS3ByReadChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 335; ChunkID cloneChunk2 = 336; @@ -862,7 +864,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -875,7 +877,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, cloneData1.get())); @@ -895,17 +897,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn1, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件遍读后不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * It is expected that after a clone file is traversed, it will not be + * converted to a regular chunk file. Write operations are performed by + * increasing the version: + * - If it is a clone chunk, the write operation will fail. + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -913,19 +917,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn1)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景五:通过RecoverChunk从S3恢复克隆文件 +// Scenario 5: Restore cloned files from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { LOG(INFO) << "current case: CloneFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 337; ChunkID cloneChunk2 = 338; @@ -938,7 +942,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn0, sn1, kChunkSize)); ASSERT_EQ(0, @@ -951,7 +955,7 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk2, sn1, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 8 * KB, @@ -977,16 +981,17 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn1, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn1, NULL_SN, string(""))); @@ -994,19 +999,19 @@ TEST_F(CSCloneRecoverTest, CloneFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn2, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn2)); ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk2, sn1)); } -// 场景六:通过ReadChunk从S3恢复 +// Scenario 6: Restore from S3 through ReadChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { LOG(INFO) << "current case: RecoverFromS3ByReadChunk"; - // 0. 构造数据上传到S3,模拟转储 + // 0. Upload construction data to S3 and simulate dump prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 339; SequenceNum sn2 = 2; @@ -1018,13 +1023,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过readchunk恢复克隆文件 + // 2. Restoring cloned files through readchunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, cloneData1.get())); @@ -1044,17 +1049,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过ReadChunk读遍clone chunk1的所有pages + // Read through all pages of clone chunk1 through ReadChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { - ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, - kChunkServerMaxIoSize, - cloneData1.get())); + ASSERT_EQ( + 0, verify.VerifyReadChunk(cloneChunk1, sn3, offset, + kChunkServerMaxIoSize, cloneData1.get())); } /** - * 预期clone文件不会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file not to be converted to regular chunk1 file + * Write by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1062,18 +1068,18 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByReadChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn3)); } -// 场景七:通过RecoverChunk从S3恢复 +// Scenario 7: Recovering from S3 through RecoverChunk TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { LOG(INFO) << "current case: RecoverFromS3ByRecoverChunk"; - // 0. 在S3中写入源数据 + // 0. Write source data in S3 prepareSourceDataInS3(); - // 1. 创建克隆文件 + // 1. Create Clone File ChunkServiceVerify verify(&opConf_); ChunkID cloneChunk1 = 341; SequenceNum sn2 = 2; @@ -1085,13 +1091,13 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 重复克隆 + // Duplicate cloning ASSERT_EQ(0, verify.VerifyCreateCloneChunk(cloneChunk1, location, sn3, sn2, kChunkSize)); ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn2, NULL_SN, string(""))); - // 2. 通过RecoverChunk恢复克隆文件 + // 2. Recovering cloned files through RecoverChunk std::shared_ptr cloneData1(new string(chunkData1_)); ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, 0, 8 * KB)); ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 8 * KB, @@ -1117,16 +1123,17 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { ASSERT_EQ(0, verify.VerifyReadChunk(cloneChunk1, sn3, 0, 12 * KB, cloneData1.get())); - // 通过RecoverChunk恢复clone chunk1的所有pages + // Restore all pages of clone chunk1 through RecoverChunk for (int offset = 0; offset < kChunkSize; offset += kChunkServerMaxIoSize) { ASSERT_EQ(0, verify.VerifyRecoverChunk(cloneChunk1, offset, kChunkServerMaxIoSize)); } /** - * 预期clone文件会转换为普通chunk1文件 - * 通过增大版本进行写入, - * 如果是clone chunk,写会失败; 如果是普通chunk,则会产生快照文件。 + * Expected clone file to be converted to regular chunk1 file + * Write by increasing the version: + * - If it is a clone chunk, the write will fail; + * - If it is a regular chunk, a snapshot file will be generated. */ ASSERT_EQ(0, verify.VerifyGetChunkInfo(cloneChunk1, sn3, NULL_SN, string(""))); @@ -1134,7 +1141,7 @@ TEST_F(CSCloneRecoverTest, RecoverFromS3ByRecoverChunk) { verify.VerifyWriteChunk(cloneChunk1, sn4, 0, 8 * KB, temp.c_str(), nullptr)); - // 删除文件 + // Delete files ASSERT_EQ(0, verify.VerifyDeleteChunk(cloneChunk1, sn4)); } diff --git a/test/integration/chunkserver/chunkserver_concurrent_test.cpp b/test/integration/chunkserver/chunkserver_concurrent_test.cpp index a5ac75a823..a79c13eeaa 100644 --- a/test/integration/chunkserver/chunkserver_concurrent_test.cpp +++ b/test/integration/chunkserver/chunkserver_concurrent_test.cpp @@ -21,72 +21,60 @@ */ #include -#include #include +#include #include -#include "src/chunkserver/copyset_node_manager.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node_manager.h" +#include "src/common/concurrent/concurrent.h" #include "src/fs/fs_common.h" #include "src/fs/local_filesystem.h" -#include "src/common/concurrent/concurrent.h" -#include "test/integration/common/peer_cluster.h" #include "test/chunkserver/datastore/filepool_helper.h" #include "test/integration/common/config_generator.h" +#include "test/integration/common/peer_cluster.h" namespace curve { namespace chunkserver { +using curve::common::Thread; +using curve::fs::FileSystemType; using curve::fs::LocalFileSystem; using curve::fs::LocalFsFactory; -using curve::fs::FileSystemType; -using curve::common::Thread; static const char* kFakeMdsAddr = "127.0.0.1:9329"; constexpr uint32_t kOpRequestAlignSize = 4096; -static const char *chunkConcurrencyParams1[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9076", - "-chunkServerStoreUri=local://./9076/", - "-chunkServerMetaUri=local://./9076/chunkserver.dat", - "-copySetUri=local://./9076/copysets", - "-raftSnapshotUri=curve://./9076/copysets", - "-raftLogUri=curve://./9076/copysets", - "-recycleUri=local://./9076/recycler", - "-chunkFilePoolDir=./9076/chunkfilepool/", - "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", - "-walFilePoolDir=./9076/walfilepool/", - "-walFilePoolMetaPath=./9076/walfilepool.meta", - "-conf=./9076/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams1[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9076", + "-chunkServerStoreUri=local://./9076/", + "-chunkServerMetaUri=local://./9076/chunkserver.dat", + "-copySetUri=local://./9076/copysets", + "-raftSnapshotUri=curve://./9076/copysets", + "-raftLogUri=curve://./9076/copysets", + "-recycleUri=local://./9076/recycler", + "-chunkFilePoolDir=./9076/chunkfilepool/", + "-chunkFilePoolMetaPath=./9076/chunkfilepool.meta", + "-walFilePoolDir=./9076/walfilepool/", + "-walFilePoolMetaPath=./9076/walfilepool.meta", + "-conf=./9076/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; -static const char *chunkConcurrencyParams2[1][16] = { - { - "chunkserver", - "-chunkServerIp=127.0.0.1", - "-chunkServerPort=9077", - "-chunkServerStoreUri=local://./9077/", - "-chunkServerMetaUri=local://./9077/chunkserver.dat", - "-copySetUri=local://./9077/copysets", - "-raftSnapshotUri=curve://./9077/copysets", - "-raftLogUri=curve://./9077/copysets", - "-recycleUri=local://./9077/recycler", - "-chunkFilePoolDir=./9077/chunkfilepool/", - "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", - "-walFilePoolDir=./9077/walfilepool/", - "-walFilePoolMetaPath=./9077/walfilepool.meta", - "-conf=./9077/chunkserver.conf", - "-raft_sync_segments=true", - NULL - }, +static const char* chunkConcurrencyParams2[1][16] = { + {"chunkserver", "-chunkServerIp=127.0.0.1", "-chunkServerPort=9077", + "-chunkServerStoreUri=local://./9077/", + "-chunkServerMetaUri=local://./9077/chunkserver.dat", + "-copySetUri=local://./9077/copysets", + "-raftSnapshotUri=curve://./9077/copysets", + "-raftLogUri=curve://./9077/copysets", + "-recycleUri=local://./9077/recycler", + "-chunkFilePoolDir=./9077/chunkfilepool/", + "-chunkFilePoolMetaPath=./9077/chunkfilepool.meta", + "-walFilePoolDir=./9077/walfilepool/", + "-walFilePoolMetaPath=./9077/walfilepool.meta", + "-conf=./9077/chunkserver.conf", "-raft_sync_segments=true", NULL}, }; butil::AtExitManager atExitManager; @@ -94,7 +82,7 @@ const int kChunkNum = 10; const ChunkSizeType kChunkSize = 16 * 1024 * 1024; const PageSizeType kPageSize = kOpRequestAlignSize; -// chunk不从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency testing for chunks not obtained from FilePool class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -136,14 +124,14 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -162,10 +150,10 @@ class ChunkServerConcurrentNotFromFilePoolTest : public testing::Test { CopysetID copysetId; std::map paramsIndexs; - std::vector params; + std::vector params; }; -// chunk从FilePool获取的chunkserver并发测试 +// Chunkserver concurrency test obtained by chunk from FilePool class ChunkServerConcurrentFromFilePoolTest : public testing::Test { protected: virtual void SetUp() { @@ -182,7 +170,6 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { electionTimeoutMs = 3000; snapshotIntervalS = 60; - ASSERT_TRUE(cg1.Init("9077")); cg1.SetKV("copyset.election_timeout_ms", "3000"); cg1.SetKV("copyset.snapshot_interval_s", "60"); @@ -198,14 +185,12 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { params.push_back(const_cast(chunkConcurrencyParams2[0])); - // 初始化FilePool,这里会预先分配一些chunk + // Initialize FilePool, where some chunks will be pre allocated lfs = LocalFsFactory::CreateFs(FileSystemType::EXT4, ""); - poolDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool/"; - metaDir = "./" - + std::to_string(PeerCluster::PeerToId(peer1)) - + "/chunkfilepool.meta"; + poolDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool/"; + metaDir = "./" + std::to_string(PeerCluster::PeerToId(peer1)) + + "/chunkfilepool.meta"; FilePoolMeta meta(kChunkSize, kPageSize, poolDir); FilePoolHelper::PersistEnCodeMetaInfo(lfs, meta, metaDir); @@ -213,7 +198,7 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // There maybe one chunk in cleaning, so you should allocate // (kChunkNum + 1) chunks in start if you want to use kChunkNum chunks. // This situation will not occur in the production environment - allocateChunk(lfs, kChunkNum+1, poolDir, kChunkSize); + allocateChunk(lfs, kChunkNum + 1, poolDir, kChunkSize); } virtual void TearDown() { std::string rmdir1("rm -fr "); @@ -224,14 +209,14 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { // wait for process exit ::usleep(100 * 1000); } - void InitCluster(PeerCluster *cluster) { + void InitCluster(PeerCluster* cluster) { PeerId leaderId; Peer leaderPeer; cluster->SetElectionTimeoutMs(electionTimeoutMs); cluster->SetsnapshotIntervalS(snapshotIntervalS); ASSERT_EQ(0, cluster->StartPeer(peer1, PeerCluster::PeerToId(peer1))); - // 等待leader产生 + // Waiting for the leader to be generated ASSERT_EQ(0, cluster->WaitLeader(&leaderPeer)); ASSERT_EQ(0, leaderId.parse(leaderPeer.address())); ASSERT_STREQ(peer1.address().c_str(), leaderId.to_string().c_str()); @@ -243,28 +228,23 @@ class ChunkServerConcurrentFromFilePoolTest : public testing::Test { std::vector peers; PeerId leaderId; Peer leaderPeer; - int electionTimeoutMs; - int snapshotIntervalS; + int electionTimeoutMs; + int snapshotIntervalS; LogicPoolID logicPoolId; - CopysetID copysetId; + CopysetID copysetId; - std::map paramsIndexs; - std::vector params; + std::map paramsIndexs; + std::vector params; std::string poolDir; std::string metaDir; - std::shared_ptr lfs; + std::shared_ptr lfs; }; -// 写chunk -int WriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkId, - off_t offset, - size_t len, - const char *data, +// Write chunk +int WriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkId, off_t offset, size_t len, const char* data, const int sn = 1) { PeerId leaderId(leader.address()); brpc::Channel channel; @@ -299,13 +279,9 @@ int WriteChunk(Peer leader, return 0; } -// 随机选择一个chunk的随机offset进行read -void RandReadChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for read +void RandReadChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; uint64_t appliedIndex = 1; PeerId leaderId(leader.address()); @@ -314,7 +290,7 @@ void RandReadChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -330,7 +306,7 @@ void RandReadChunk(Peer leader, request.set_size(kOpRequestAlignSize); request.set_appliedindex(appliedIndex); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -342,7 +318,8 @@ void RandReadChunk(Peer leader, } if (response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS && - response.status() != CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { //NOLINT + response.status() != + CHUNK_OP_STATUS::CHUNK_OP_STATUS_CHUNK_NOTEXIST) { // NOLINT LOG(INFO) << "read failed: " << CHUNK_OP_STATUS_Name(response.status()); ret = -1; @@ -352,13 +329,9 @@ void RandReadChunk(Peer leader, } } -// 随机选择一个chunk的随机offset进行write -void RandWriteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop, - const int sn = 1) { +// Randomly select a chunk's random offset for writing +void RandWriteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop, const int sn = 1) { int ret = 0; char data[kOpRequestAlignSize] = {'a'}; int length = kOpRequestAlignSize; @@ -369,7 +342,7 @@ void RandWriteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -385,7 +358,7 @@ void RandWriteChunk(Peer leader, request.set_size(kOpRequestAlignSize); cntl.request_attachment().append(data, length); - // 随机选择一个offset + // Randomly select an offset uint64_t pageIndex = butil::fast_rand_less_than(kChunkSize / kPageSize); request.set_offset(pageIndex * kPageSize); @@ -406,12 +379,9 @@ void RandWriteChunk(Peer leader, } } -// 随机选择一个chunk删除 -void RandDeleteChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID chunkIdRange, - const int loop) { +// Randomly select a chunk to delete +void RandDeleteChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID chunkIdRange, const int loop) { int ret = 0; PeerId leaderId(leader.address()); @@ -420,7 +390,7 @@ void RandDeleteChunk(Peer leader, ChunkService_Stub stub(&channel); for (int i = 0; i < loop; ++i) { - // 随机选择一个chunk + // Randomly select a chunk ChunkID chunkId = butil::fast_rand_less_than(chunkIdRange); chunkId += 1; @@ -450,12 +420,9 @@ void RandDeleteChunk(Peer leader, } } -// 创建clone chunk -void CreateCloneChunk(Peer leader, - LogicPoolID logicPoolId, - CopysetID copysetId, - ChunkID start, - ChunkID end) { +// Create clone chunk +void CreateCloneChunk(Peer leader, LogicPoolID logicPoolId, CopysetID copysetId, + ChunkID start, ChunkID end) { int ret = 0; SequenceNum sn = 2; SequenceNum correctedSn = 1; @@ -497,10 +464,10 @@ void CreateCloneChunk(Peer leader, } /** - * chunk不是事先在FilePool分配好的 + * Chunks are not pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -511,37 +478,21 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -549,33 +500,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -583,8 +525,9 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -592,29 +535,19 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -622,7 +555,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 3. 将数据read出来验证 + // 3. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -646,7 +579,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data>='a 'and<='a'+kThreadNum -1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -654,7 +587,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -664,50 +597,30 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -716,7 +629,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -726,38 +639,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机read chunk + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -765,33 +663,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 -TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { //NOLINT +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet +TEST_F(ChunkServerConcurrentNotFromFilePoolTest, + RandReadMultiNotExistChunk) { // NOLINT const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -799,7 +690,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -809,39 +700,26 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生,避免下面同时从 - // chunkfile pool生成new chunk导致write 超时失败 + // 2. Initiate a write to the chunk to ensure that the chunk has already + // been generated, avoiding the need for both + // Chunkfile pool generates new chunks, resulting in write timeout + // failure for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机write chunk + // 4. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -849,7 +727,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -857,38 +735,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -897,7 +761,7 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -907,38 +771,24 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -946,30 +796,23 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -979,10 +822,10 @@ TEST_F(ChunkServerConcurrentNotFromFilePoolTest, CreateCloneMultiChunk) { } /** - * chunk是事先在FilePool分配好的 + * Chunks are pre allocated in FilePool */ -// 多线程并发随机读同一个chunk +// Multiple threads simultaneously randomly read the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { uint64_t chunkId = 1; off_t offset = 0; @@ -993,36 +836,21 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - data.c_str(), - sn)); - - // 3. 起多个线程执行随机read chunk + // 2. Initiate a write to the chunk to ensure that it has been generated + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, chunkId, offset, + length, data.c_str(), sn)); + + // 3. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1030,33 +858,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadOneChunk) { } } -// 多线程并发随机写同一个chunk +// Multiple threads concurrently randomly write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1064,8 +883,9 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteOneChunk) { } } -// 多线程并发写同一个chunk同一个offset -TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { //NOLINT +// Multiple threads simultaneously writing the same chunk and offset +TEST_F(ChunkServerConcurrentFromFilePoolTest, + WriteOneChunkOnTheSameOffset) { // NOLINT const int kThreadNum = 10; std::vector datas; ChunkID chunkId = 1; @@ -1073,29 +893,19 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { int length = 2 * kOpRequestAlignSize; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunk std::vector threads; for (int i = 0; i < kThreadNum; ++i) { std::string data(length, 'a' + i); datas.push_back(data); - threads.push_back(Thread(WriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkId, - offset, - length, - datas[i].c_str(), + threads.push_back(Thread(WriteChunk, leaderPeer, logicPoolId, copysetId, + chunkId, offset, length, datas[i].c_str(), sn)); } @@ -1103,7 +913,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { threads[j].join(); } - // 4. 将数据read出来验证 + // 4. Read out the data for verification brpc::Channel channel; channel.Init(leaderId.addr, NULL); ChunkService_Stub stub(&channel); @@ -1127,7 +937,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { std::string result = cntl.response_attachment().to_string(); ASSERT_EQ(length, result.size()); - // 读出来的数据的字符>='a' 且<= 'a' + kThreadNum - 1 + // The characters of the read data >='a' and <= 'a' + kThreadNum - 1 ASSERT_GE(result[0] - 'a', 0); ASSERT_LE(result[0] - 'a', kThreadNum - 1); for (int i = 1; i < length - 1; ++i) { @@ -1135,7 +945,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, WriteOneChunkOnTheSameOffset) { } } -// 多线程并发随机读写同一个chunk +// Multiple threads concurrently randomly read and write the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1143,38 +953,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { ChunkID chunkIdRange = 1; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1183,7 +979,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteOneChunk) { } } -// 多线程并发读不同的chunk +// Multiple threads concurrently reading different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1193,38 +989,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 4. 起多个线程执行随机read chunk + // 4. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1232,33 +1013,25 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiChunk) { } } -// 多线程并发读不同的chunk,注意这些chunk都还没有被写过 +// Multiple threads simultaneously read different chunks, please note that none +// of these chunks have been written yet TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { const int kThreadNum = 10; const int kMaxLoop = 200; ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read chunk + // 2. Starting multiple threads to execute random read chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1266,7 +1039,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadMultiNotExistChunk) { } } -// 多线程并发随机写同多个chunk +// Multiple threads concurrently randomly write to the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1274,26 +1047,17 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机write chunk + // 2. Starting multiple threads to execute random write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1301,7 +1065,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunk) { } } -// 多线程并发随机读写同多个chunk +// Multi thread concurrent random read and write of the same chunk TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { std::string data(kOpRequestAlignSize, 'a'); const int kThreadNum = 10; @@ -1309,38 +1073,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机read write chunk + // 2. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(2); if (read) { - // 起read线程 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } @@ -1349,7 +1099,7 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandReadWriteMultiChunk) { } } -// 多线程并发删除不同的chunk +// Simultaneous deletion of different chunks through multiple threads TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1359,38 +1109,24 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { ChunkID chunkIdRange = kChunkNum; const int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 对chunk发起一次写,保证chunk已经产生 + // 2. Initiate a write to the chunk to ensure that it has been generated for (int k = 1; k < chunkIdRange + 1; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // 3. 起多个线程执行随机delete chunk + // 3. Starting multiple threads to execute random delete chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { - // 起delete线程 - threads.push_back(Thread(RandDeleteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop)); + // Start delete thread + threads.push_back(Thread(RandDeleteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop)); } for (int j = 0; j < kThreadNum; ++j) { @@ -1398,30 +1134,23 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, DeleteMultiChunk) { } } -// 多线程并发create clone不同的chunk +// Multiple threads concurrent create clones with different chunks TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { const int kThreadNum = 10; ChunkID chunkIdRange = kChunkNum; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 起多个线程执行随机create clone chunk + // 2. Starting multiple threads to execute random create clone chunks std::vector threads; int chunksPerThread = chunkIdRange / kThreadNum; for (int i = 0; i < kThreadNum; ++i) { - threads.push_back(Thread(CreateCloneChunk, - leaderPeer, - logicPoolId, - copysetId, - i * chunksPerThread + 1, + threads.push_back(Thread(CreateCloneChunk, leaderPeer, logicPoolId, + copysetId, i * chunksPerThread + 1, (i + 1) * chunksPerThread)); } @@ -1430,7 +1159,8 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, CreateCloneMultiChunk) { } } -// 多线程并发随机读写同多个chunk,同事伴随这并发的COW +// Multiple threads simultaneously read and write randomly to the same chunk, +// with colleagues accompanying the concurrent COW TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { off_t offset = 0; int length = kOpRequestAlignSize; @@ -1440,52 +1170,32 @@ TEST_F(ChunkServerConcurrentFromFilePoolTest, RandWriteMultiChunkWithCOW) { ChunkID chunkIdRange = kChunkNum / 2; int sn = 1; - // 1. 启动一个成员的复制组 - PeerCluster cluster("InitShutdown-cluster", - logicPoolId, - copysetId, - peers, - params, - paramsIndexs); + // 1. Start a replication group for a member + PeerCluster cluster("InitShutdown-cluster", logicPoolId, copysetId, peers, + params, paramsIndexs); ASSERT_EQ(0, cluster.StartFakeTopoloyService(kFakeMdsAddr)); InitCluster(&cluster); - // 2. 用低版本的sn写一遍chunk + // 2. Write a chunk using a lower version of SN for (int k = 1; k <= chunkIdRange; ++k) { - ASSERT_EQ(0, WriteChunk(leaderPeer, - logicPoolId, - copysetId, - k, - offset, - length, - data.c_str(), - sn)); + ASSERT_EQ(0, WriteChunk(leaderPeer, logicPoolId, copysetId, k, offset, + length, data.c_str(), sn)); } - // sn加1,保证后面的write会产生COW + // Add 1 to sn to ensure that subsequent writes will generate COW sn += 1; - // 3. 起多个线程执行随机read write chunk + // 3. Starting multiple threads to execute random read write chunks std::vector threads; for (int i = 0; i < kThreadNum; ++i) { int read = butil::fast_rand_less_than(10); if (read <= 1) { - // 起read线程,20%概率 - threads.push_back(Thread(RandReadChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start read thread with a 20% probability + threads.push_back(Thread(RandReadChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } else { - // 起write线程 - threads.push_back(Thread(RandWriteChunk, - leaderPeer, - logicPoolId, - copysetId, - chunkIdRange, - kMaxLoop, - sn)); + // Start write thread + threads.push_back(Thread(RandWriteChunk, leaderPeer, logicPoolId, + copysetId, chunkIdRange, kMaxLoop, sn)); } } diff --git a/test/integration/client/chunkserver_exception_test.cpp b/test/integration/client/chunkserver_exception_test.cpp index aa676fc718..9653b4857a 100644 --- a/test/integration/client/chunkserver_exception_test.cpp +++ b/test/integration/client/chunkserver_exception_test.cpp @@ -20,22 +20,22 @@ * Author: tongguangxun */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT -#include -#include #include +#include #include // NOLINT +#include +#include // NOLINT +#include +#include +#include // NOLINT -#include "src/common/timeutility.h" #include "include/client/libcurve.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -48,12 +48,12 @@ curve::client::InflightControl inflightContl; using curve::CurveCluster; const std::vector mdsConf{ - { "--confPath=./conf/mds.conf" }, - { "--log_dir=./runlog/ChunkserverException" }, - { "--mdsDbName=module_exception_curve_chunkserver" }, - { "--sessionInterSec=20" }, - { "--etcdAddr=127.0.0.1:22233" }, - { "--updateToRepoSec=5" }, + {"--confPath=./conf/mds.conf"}, + {"--log_dir=./runlog/ChunkserverException"}, + {"--mdsDbName=module_exception_curve_chunkserver"}, + {"--sessionInterSec=20"}, + {"--etcdAddr=127.0.0.1:22233"}, + {"--updateToRepoSec=5"}, }; const std::vector chunkserverConf4{ @@ -143,15 +143,16 @@ class CSModuleException : public ::testing::Test { cluster->PrepareConfig(confPath, clientConf); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, "127.0.0.1:22233", "127.0.0.1:22234", std::vector{ - "--name=module_exception_test_chunkserver" }); + "--name=module_exception_test_chunkserver"}); LOG(INFO) << "etcd 1 started on 127.0.0.1:22233:22234, pid = " << pid; ASSERT_GT(pid, 0); - // 2. 先启动一个mds,让其成为leader,然后再启动另外两个mds节点 + // 2. Start one mds first, make it a leader, and then start the other + // two mds nodes pid = cluster->StartSingleMDS(1, "127.0.0.1:22122", 22128, mdsConf, true); LOG(INFO) << "mds 1 started on 127.0.0.1:22122, pid = " << pid; @@ -168,7 +169,7 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(8)); - // 3. 创建物理池 + // 3. Creating a physical pool std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( " -cluster_map=./test/integration/client/" @@ -190,7 +191,7 @@ class CSModuleException : public ::testing::Test { retry++; } - // 4. 创建chunkserver + // 4. Create chunkserver pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; @@ -207,7 +208,8 @@ class CSModuleException : public ::testing::Test { ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + std::string( @@ -228,15 +230,15 @@ class CSModuleException : public ::testing::Test { } ASSERT_EQ(ret, 0); - // 6. 初始化client配置 + // 6. Initialize client configuration ret = Init(confPath.c_str()); ASSERT_EQ(ret, 0); - // 7. 创建一个文件 + // 7. Create a file fd = curve::test::FileCommonOperation::Open("/test1", "curve"); ASSERT_NE(fd, -1); - // 8. 先睡眠10s,让chunkserver选出leader + // 8. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } @@ -282,12 +284,15 @@ class CSModuleException : public ::testing::Test { } /** - * 监测client io能否在预期时间内正常下发 - * @param: off是当前需要下发IO的偏移 - * @param: size是下发io的大小 - * @param: predictTimeS是预期在多少秒内IO可以恢复 - * @param[out]: failCount为当前io下发中错误返回的数量 - * @return: 如果io在预期时间内能够正常下发,则返true,否则返回false + * Monitor whether client I/O can be issued within the expected time. + * @param off: The current offset for the I/O to be issued. + * @param size: The size of the I/O to be issued. + * @param predictTimeS: The expected time in seconds within which the I/O + * should recover. + * @param[out] failCount: The count of errors returned during the ongoing + * I/O. + * @return true if I/O can be issued normally within the expected time, + * false otherwise. */ bool MonitorResume(uint64_t off, uint64_t size, uint64_t predictTimeS, uint64_t* failCount = nullptr) { @@ -335,7 +340,7 @@ class CSModuleException : public ::testing::Test { failCount == nullptr ? 0 : (*failCount = ioFailedCount); - // 唤醒io线程 + // Wake up IO thread iothread.join(); inflightContl.WaitInflightAllComeBack(); @@ -345,7 +350,7 @@ class CSModuleException : public ::testing::Test { int fd; - // 是否出现挂卸载失败 + // Whether there is a failure in mounting or unmounting. bool createOrOpenFailed; bool createDone; std::mutex createMtx; @@ -354,173 +359,186 @@ class CSModuleException : public ::testing::Test { CurveCluster* cluster; }; -// 测试环境拓扑:在单节点上启动一个client、三个chunkserver、三个mds、一个etcd +// Test environment topology: Start one client, three chunkservers, three mds, +// and one etcd on a single node TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: KillOneChunkserverThenRestartTheChunkserver"; /********* KillOneChunkserverThenRestartTheChunkserver **********/ - // 1. 测试重启一个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill一台chunkserver:client 读写请求最多卡顿 - // election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting a chunkserver. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Killing one chunkserver: Client + // read and write requests may experience at most a temporary delay. + // They should resume normal operation after election_timeout * 2s. + // c. Recovering the chunkserver: Client read and write requests should + // be unaffected. + // 1. Initial state of the cluster, with I/O being issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉一个chunkserver + // 2. Kill a chunkserver ASSERT_EQ(0, cluster->StopChunkServer(1)); - // 3. kill掉一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After killing a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver + // 4. Pull up the chunkserver that was just killed pid_t pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangOneChunkserverThenResumeTheChunkserver"; /********* HangOneChunkserverThenResumeTheChunkserver ***********/ - // 1. hang一台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang一台chunkserver:client - // 读写请求最多卡顿election_timeout*2s可以正常读写 - // c. 恢复chunkserver:client 读写请求无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang one chunkserver, then recover the hung chunkserver. + // 2. Expectations: + // a. When the cluster is in a normal state: client read and write + // requests can be issued normally. b. Hang one chunkserver: client read + // and write requests may experience a maximum delay of + // election_timeout*2s but can eventually proceed normally. c. Recover + // chunkserver: client read and write requests are not affected. + // 1. Initial state of the cluster, where I/O is issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang一个chunkserver + // 2. Hang a chunkserver ASSERT_EQ(0, cluster->HangChunkServer(1)); - // 3. hang一个chunkserver之后,client的io预期最多会在2*electtime后恢复 + // 3. After hanging a chunkserver, the client's IO is expected to recover at + // most 2 * electtime ASSERT_TRUE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver + // 4. Pull up the chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 重新拉起对client IO没有影响 + // 5. Pulling back has no impact on client IO ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillTwoChunkserverThenRestartTheChunkserver"; /******** KillTwoChunkserverThenRestartTheChunkserver *********/ - // 1. 测试重启两个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. kill两台chunkserver:预期client IO持续hang,新写IO和覆盖写都hang - // 拉起被kill中的一台chunkserver:client IO预期在最多在 - // (chunkserver启动回放数据+2*election_timeout)时间内恢复读写 - // c. 拉起另外一台kill的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting two chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state: client read and write + // requests should be issued normally. b. Kill two chunk servers: Expect + // ongoing client I/O to hang, both for new writes and overwrite writes. + // Bring up one of the killed chunk servers: Expect client I/O to + // recover for read and write within the time of (chunk server startup + // replay data + 2 * election_timeout) at most. + // c. Bring up the other killed chunk server: No impact on ongoing client + // I/O. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉两个chunkserver + // 2. Kill two chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); - // 3. kill掉两个chunkserver, io无法正常下发 + // 3. Kill two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 拉起刚才被kill的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Pull up the first chunkserver that was just killed, + // The client's IO is expected to recover at most 2 * electtime + // If slow start is configured, wait // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunk server that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: HangTwoChunkserverThenResumeTheChunkserver"; /******* HangTwoChunkserverThenResumeTheChunkserver **********/ - // 1. hang两台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang两台chunkserver:client IO持续hang,新写IO和覆盖写都hang - // c. 恢复其中的一台chunkserver:client IO 恢复读写, - // 从恢复chunkserver到client IO恢复时间在election_timeout*2 - // d. 恢复另外一台hang的chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang two chunk servers and then recover the hung chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state: Client read and write + // requests can be issued normally. b. Hang two chunk servers: Client I/O + // remains hung, both new and overwrite write I/O. c. Recover one of the + // hung chunk servers: Client I/O resumes read and write operations, + // the time from recovering the chunk server to the client I/O + // recovery is within election_timeout*2. + // d. Recover the other hung chunk server: No impact on client I/O. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉两个个chunkserver + // 2. Hang off two chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); - // 3. hang两个chunkserver, io无法正常下发 + // 3. Hang two chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 拉起刚才被hang的chunkserver的第一个, - // client的io预期最多会在2*electtime后恢复 - // 如果配置了慢启动,则需要等待 + // 5. Bring up the first chunk server that was previously hung. + // The client's I/O is expected to recover within a maximum of 2 * + // election_timeout. If slow start is configured, waiting may be + // required. // (copysetNum / load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); - // 7. 集群io不影响,正常下发 + // 7. Cluster IO is not affected and is distributed normally ASSERT_TRUE(MonitorResume(0, 4096, 1)); LOG(INFO) << "current case: KillThreeChunkserverThenRestartTheChunkserver"; /******** KillThreeChunkserverThenRestartTheChunkserver ******/ - // 1. 测试重启三个chunkserver - // 2.预期: - // a. 集群状态正常时:client读写请求可以正常下发 - // b. 关闭三台chunkserver:client IO hang - // c. 重启一台chunkserver:client IO hang - // d. 重启第二台chunkserver:client IO hang, - // 直到chunkserver完全恢复,IO恢复。 - // 恢复时间约等于(chunkserver启动回放数据+2*election_timeout) - // e. 重启第三台chunkserver:client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Test restarting three chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state, client read and write + // requests can be issued normally. b. Shutting down three chunk servers: + // Client I/O hangs. c. Restarting one chunk server: Client I/O hangs. d. + // Restarting the second chunk server: Client I/O hangs until the chunk + // server is fully recovered. + // The recovery time is approximately equal to (chunk server startup + // replay data + 2 * election_timeout). + // e. Restarting the third chunk server: Client I/O is unaffected. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. kill掉三个chunkserver + // 2. Kill three chunkservers ASSERT_EQ(0, cluster->StopChunkServer(1)); ASSERT_EQ(0, cluster->StopChunkServer(2)); ASSERT_EQ(0, cluster->StopChunkServer(3)); - // 3. kill掉三个chunkserver, io无法正常下发 + // 3. Kill three chunkservers, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 2)); - // 4. 拉起刚才被kill的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just killed pid = cluster->StartSingleChunkServer(1, "127.0.0.1:22125", chunkserverConf4); LOG(INFO) << "chunkserver 1 started on 127.0.0.1:22125, pid = " << pid; ASSERT_GT(pid, 0); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被kill的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just killed pid = cluster->StartSingleChunkServer(2, "127.0.0.1:22126", chunkserverConf5); LOG(INFO) << "chunkserver 2 started on 127.0.0.1:22126, pid = " << pid; ASSERT_GT(pid, 0); - // 7. client的io恢复 + // 7. Client's IO recovery ASSERT_TRUE(MonitorResume(0, 4096, 80)); - // 8. 拉起其他被kil的chunkserver + // 8. Pull up other chunkservers that have been killed pid = cluster->StartSingleChunkServer(3, "127.0.0.1:22127", chunkserverConf6); LOG(INFO) << "chunkserver 3 started on 127.0.0.1:22127, pid = " << pid; @@ -528,37 +546,37 @@ TEST_F(CSModuleException, ChunkserverException) { LOG(INFO) << "current case: HangThreeChunkserverThenResumeTheChunkserver"; /******** HangThreeChunkserverThenResumeTheChunkserver **********/ - // 1. hang三台chunkserver,然后恢复hang的chunkserver - // 2.预期 - // a. 集群状态正常时:client读写请求可以正常下发 - // b. hang三台chunkserver:client IO hang - // c. 恢复一台chunkserver:client IO hang - // d. 再恢复一台chunkserver:预期在 - // election_timeout*2左右的时间,client IO恢复 - // e. 恢复最后一台chunkserver:预期client IO无影响 - // 1. 集群最初状态,io正常下发 + // 1. Hang three chunk servers and then recover the hung chunk servers. + // 2. Expectations: + // a. When the cluster is in a normal state: client read and write + // requests can be issued normally. b. Hang three chunk servers: client + // I/O hangs. c. Recover one chunk server: client I/O hangs. d. Recover + // another chunk server: Expect client I/O to recover in approximately + // election_timeout*2 time. + // e. Recover the last chunk server: Expect no impact on client I/O. + // 1. Initial state of the cluster, I/O issued normally. ASSERT_TRUE(MonitorResume(0, 4096, 1)); - // 2. hang掉三个chunkserver + // 2. Hang down three chunkservers ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); ASSERT_EQ(0, cluster->HangChunkServer(3)); - // 3. hang三个chunkserver, io无法正常下发 + // 3. Hang three chunkservers, IO cannot be distributed normally ASSERT_FALSE(MonitorResume(0, 4096, 30)); - // 4. 拉起刚才被hang的chunkserver的第一个 + // 4. Pull up the first chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(1)); - // 5. 只有一个chunkserver工作, io无法正常下发 + // 5. Only one chunkserver is working, IO cannot be issued normally ASSERT_FALSE(MonitorResume(0, 4096, 80)); - // 6. 拉起刚才被hang的chunkserver的第二个 + // 6. Pull up the second chunkserver that was just hung ASSERT_EQ(0, cluster->RecoverHangChunkServer(2)); ASSERT_EQ(0, cluster->RecoverHangChunkServer(3)); - // 7. client的io预期最多会在2*electtime s内恢复 - // 如果配置了慢启动,则需要等待 - // (copysetNum / load_concurrency) * election_timeout + // 7. The client's IO is expected to recover within a maximum of 2 * + // electtime seconds If slow start is configured, wait (copysetNum / + // load_concurrency) * election_timeout ASSERT_TRUE(MonitorResume(0, 4096, 80)); } diff --git a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp index 7fa055321b..a2d71f4485 100644 --- a/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp +++ b/test/mds/heartbeat/chunkserver_healthy_checker_test.cpp @@ -20,24 +20,26 @@ * Author: lixiaocui */ -#include -#include #include "src/mds/heartbeat/chunkserver_healthy_checker.h" + +#include +#include + #include "src/mds/topology/topology_item.h" #include "test/mds/mock/mock_topology.h" +using ::curve::mds::topology::MockTopology; +using ::testing::_; +using ::testing::DoAll; using ::testing::Return; using ::testing::SetArgPointee; -using ::testing::DoAll; -using ::testing::_; -using ::curve::mds::topology::MockTopology; using ::curve::mds::topology::ChunkServer; using ::curve::mds::topology::ChunkServerStatus; -using ::curve::mds::topology::OnlineState; using ::curve::mds::topology::CopySetKey; -using ::curve::mds::topology::kTopoErrCodeSuccess; using ::curve::mds::topology::kTopoErrCodeInternalError; +using ::curve::mds::topology::kTopoErrCodeSuccess; +using ::curve::mds::topology::OnlineState; namespace curve { namespace mds { @@ -53,7 +55,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { HeartbeatInfo info; { - // chunkserver首次更新heartbeatInfo + // Chunkserver updates heartbeatInfo for the first time checker->UpdateLastReceivedHeartbeatTime(1, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 2, steady_clock::now() - std::chrono::milliseconds(4000)); @@ -65,8 +67,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { 6, steady_clock::now() - std::chrono::milliseconds(10000)); checker->UpdateLastReceivedHeartbeatTime( 7, steady_clock::now() - std::chrono::milliseconds(10000)); - checker->UpdateLastReceivedHeartbeatTime( - 8, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(8, steady_clock::now()); checker->UpdateLastReceivedHeartbeatTime( 9, steady_clock::now() - std::chrono::milliseconds(4000)); checker->UpdateLastReceivedHeartbeatTime( @@ -94,30 +95,32 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver-1 更新为online - // chunkserver-2 心跳miss,保持unstable - // chunkserver-3,chunkserver-5,chunkserver-6心跳offline, - // chunkserver-3的retired状态会被更新, 从心跳map中移除 - // chunkserver-5已经是retired状态,无需更新 - // chunkserver-6 get info失败, 未成功更新状态 - // chunnkserver-7 update失败, 未成功更新状态 - // chunkserver-8, pendding && online, 更新为onLine - // chunkserver-9, pendding && unstable, 更新为retired - // chunkserver-10, pendding && offline, 更新为retired + // Chunkserver-1 is updated to online. + // Chunkserver-2 has a heartbeat miss and remains in the unstable state. + // Chunkserver-3, Chunkserver-5, and Chunkserver-6 have heartbeat + // offline. Chunkserver-3's retired status will be updated and removed + // from the heartbeat map. Chunkserver-5 is already in a retired state + // and does not need an update. Chunkserver-6 fails to get information, + // and the status update is unsuccessful. Chunkserver-7 fails to update, + // and the status update is unsuccessful. Chunkserver-8 is pending and + // online, updated to online. Chunkserver-9 is pending and unstable, + // updated to retired. Chunkserver-10 is pending and offline, updated to + // retired. EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(7).WillRepeatedly(Return(kTopoErrCodeSuccess)); - ChunkServer cs2(2, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs3(3, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs5(5, "", "", 1, "", 0, "", - ChunkServerStatus::RETIRED, OnlineState::UNSTABLE); - ChunkServer cs7(7, "", "", 1, "", 0, "", - ChunkServerStatus::READWRITE, OnlineState::UNSTABLE); - ChunkServer cs9(9, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); - ChunkServer cs10(10, "", "", 1, "", 0, "", - ChunkServerStatus::PENDDING, OnlineState::UNSTABLE); + .Times(7) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); + ChunkServer cs2(2, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs3(3, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs5(5, "", "", 1, "", 0, "", ChunkServerStatus::RETIRED, + OnlineState::UNSTABLE); + ChunkServer cs7(7, "", "", 1, "", 0, "", ChunkServerStatus::READWRITE, + OnlineState::UNSTABLE); + ChunkServer cs9(9, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); + ChunkServer cs10(10, "", "", 1, "", 0, "", ChunkServerStatus::PENDDING, + OnlineState::UNSTABLE); EXPECT_CALL(*topology, GetChunkServer(2, _)) .WillOnce(DoAll(SetArgPointee<1>(cs2), Return(true))); EXPECT_CALL(*topology, GetChunkServer(3, _)) @@ -128,8 +131,7 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { .WillOnce(Return(std::vector{})); EXPECT_CALL(*topology, GetChunkServer(5, _)) .WillOnce(DoAll(SetArgPointee<1>(cs5), Return(true))); - EXPECT_CALL(*topology, GetChunkServer(6, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*topology, GetChunkServer(6, _)).WillOnce(Return(false)); EXPECT_CALL(*topology, GetChunkServer(7, _)) .WillOnce(DoAll(SetArgPointee<1>(cs7), Return(true))); EXPECT_CALL(*topology, GetChunkServer(9, _)) @@ -164,15 +166,13 @@ TEST(ChunkserverHealthyChecker, test_checkHeartBeat_interval) { } { - // chunkserver 2, 6 ,7 收到心跳 - checker->UpdateLastReceivedHeartbeatTime( - 2, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 6, steady_clock::now()); - checker->UpdateLastReceivedHeartbeatTime( - 7, steady_clock::now()); + // chunkserver 2, 6, 7 Heartbeat received + checker->UpdateLastReceivedHeartbeatTime(2, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(6, steady_clock::now()); + checker->UpdateLastReceivedHeartbeatTime(7, steady_clock::now()); EXPECT_CALL(*topology, UpdateChunkServerOnlineState(_, _)) - .Times(3).WillRepeatedly(Return(kTopoErrCodeSuccess)); + .Times(3) + .WillRepeatedly(Return(kTopoErrCodeSuccess)); checker->CheckHeartBeatInterval(); ASSERT_TRUE(checker->GetHeartBeatInfo(2, &info)); ASSERT_EQ(OnlineState::ONLINE, info.state); diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp index 11c70f8572..2a388c8944 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_helper_test.cpp @@ -20,23 +20,26 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" + #include "src/common/namespace_define.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; -using ::curve::common::SEGMENTINFOKEYPREFIX; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; +using ::curve::common::SEGMENTINFOKEYPREFIX; namespace curve { namespace mds { @@ -44,18 +47,18 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { auto mockEtcdClient = std::make_shared(); { - // 1. list失败 + // 1. list failed EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) .WillOnce(Return(EtcdErrCode::EtcdCanceled)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 2. list成功,解析失败 + // 2. list successful, parsing failed std::vector values{"hello"}; EXPECT_CALL(*mockEtcdClient, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, @@ -64,10 +67,10 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); } { - // 3. 获取已有的segment alloc value成功 + // 3. Successfully obtained the existing segment alloc value std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient, @@ -77,7 +80,7 @@ TEST(TestAllocStatisticHelper, test_GetExistSegmentAllocValues) { DoAll(SetArgPointee<2>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::GetExistSegmentAllocValues( - &out, mockEtcdClient)); + &out, mockEtcdClient)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1024, out[1]); } @@ -89,32 +92,35 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { // 1. CalculateSegmentAlloc ok LOG(INFO) << "start test1......"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(Return(EtcdErrCode::EtcdUnknown)); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 2. ListWithLimitAndRevision成功,但是解析失败 + // 2. ListWithLimitAndRevision succeeded, but parsing failed LOG(INFO) << "start test2......"; std::vector values{"hello"}; std::string lastKey = "021"; EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce( DoAll(SetArgPointee<4>(values), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(-1, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); } { - // 3. ListWithLimitAndRevision成功, 解析成功, bundle=1000, 获取个数为1 + // 3. ListWithLimitAndRevision successful, parsing successful, + // bundle=1000, number obtained is 1 LOG(INFO) << "start test3......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; ASSERT_TRUE( @@ -123,23 +129,24 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey = NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 0); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey), - Return(EtcdErrCode::EtcdOK))); + Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(1, out.size()); ASSERT_EQ(1 << 30, out[1]); } { - // 4. ListWithLimitAndRevision成功, 解析成功 - // bundle=1000, 获取个数为1001 + // 4. ListWithLimitAndRevision successful, parsing successful + // bundle=1000, get a number of 1001 LOG(INFO) << "start test4......"; PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; std::vector values; @@ -160,20 +167,22 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { std::string lastKey2 = NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey1), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), + EXPECT_CALL(*mockEtcdClient, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, + GETBUNDLE, 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), SetArgPointee<5>(lastKey2), Return(EtcdErrCode::EtcdOK))); std::map out; ASSERT_EQ(0, AllocStatisticHelper::CalculateSegmentAlloc( - 2, mockEtcdClient, &out)); + 2, mockEtcdClient, &out)); ASSERT_EQ(2, out.size()); ASSERT_EQ(500L * (1 << 30), out[1]); ASSERT_EQ(501L * (1 << 30), out[2]); @@ -181,5 +190,3 @@ TEST(TestAllocStatisticHelper, test_CalculateSegmentAlloc) { } } // namespace mds } // namespace curve - - diff --git a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp index c51e91587c..b260bb502e 100644 --- a/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp +++ b/test/mds/nameserver2/allocstatistic/alloc_statistic_test.cpp @@ -20,21 +20,23 @@ * Author: lixiaocui */ +#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" + #include -#include "src/mds/nameserver2/helper/namespace_helper.h" + +#include "src/common/namespace_define.h" #include "src/mds/nameserver2/allocstatistic/alloc_statistic_helper.h" +#include "src/mds/nameserver2/helper/namespace_helper.h" #include "test/mds/mock/mock_etcdclient.h" -#include "src/mds/nameserver2/allocstatistic/alloc_statistic.h" -#include "src/common/namespace_define.h" using ::testing::_; -using ::testing::Return; -using ::testing::SetArgPointee; using ::testing::DoAll; using ::testing::Matcher; +using ::testing::Return; +using ::testing::SetArgPointee; -using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTALLOCSIZEKEY; +using ::curve::common::SEGMENTALLOCSIZEKEYEND; using ::curve::common::SEGMENTINFOKEYEND; using ::curve::common::SEGMENTINFOKEYPREFIX; @@ -60,17 +62,18 @@ class AllocStatisticTest : public ::testing::Test { TEST_F(AllocStatisticTest, test_Init) { { - // 1. 从etcd中获取当前revision失败 + // 1. Failed to obtain the current revision from ETCD LOG(INFO) << "test1......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdCanceled)); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(Return(EtcdErrCode::EtcdCanceled)); ASSERT_EQ(-1, allocStatistic_->Init()); } { - // 2. 获取已经存在的logicalPool对应的alloc大小失败 + // 2. Failed to obtain the alloc size corresponding to the existing + // logicalPool LOG(INFO) << "test2......"; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(Return(EtcdErrCode::EtcdOK)); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(Return(EtcdErrCode::EtcdOK)); EXPECT_CALL(*mockEtcdClient_, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) @@ -80,12 +83,12 @@ TEST_F(AllocStatisticTest, test_Init) { ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); } { - // 3. init成功 + // 3. init successful LOG(INFO) << "test3......"; std::vector values{ NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)). - WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); EXPECT_CALL(*mockEtcdClient_, List(SEGMENTALLOCSIZEKEY, SEGMENTALLOCSIZEKEYEND, Matcher*>(_))) @@ -99,10 +102,10 @@ TEST_F(AllocStatisticTest, test_Init) { } TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { - // 初始化 allocStatistic - // 旧值: logicalPooId(1):1024 + // Initialize allocStatistic + // Old value: logicalPooId(1):1024 std::vector values{ - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; + NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 1024)}; EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); EXPECT_CALL(*mockEtcdClient_, @@ -114,91 +117,96 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { PageFileSegment segment; segment.set_segmentsize(1 << 30); segment.set_logicalpoolid(1); - segment.set_chunksize(16*1024*1024); + segment.set_chunksize(16 * 1024 * 1024); segment.set_startoffset(0); std::string encodeSegment; values.clear(); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + ASSERT_TRUE(NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); for (int i = 1; i <= 500; i++) { values.emplace_back(encodeSegment); } - // 1. 在定期持久化线程和统计线程启动前,只能获取旧值 + // 1. Only old values can be obtained before regular persistent threads and + // statistical threads are started int64_t alloc; ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024, alloc); ASSERT_FALSE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - // 2. 更新segment的值 + // 2. Update the value of segment allocStatistic_->DeAllocSpace(1, 64, 1); allocStatistic_->AllocSpace(1, 32, 1); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); ASSERT_EQ(1024 - 32, alloc); - // 设置mock的etcd中segment的值 + // Set the value of segment in the ETCD of the mock // logicalPoolId(1):500 * (1<<30) // logicalPoolId(2):501 * (1<<30) segment.set_logicalpoolid(2); - ASSERT_TRUE( - NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); + ASSERT_TRUE(NameSpaceStorageCodec::EncodeSegment(segment, &encodeSegment)); for (int i = 501; i <= 1000; i++) { values.emplace_back(encodeSegment); } - std::string lastKey1 = - NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); + std::string lastKey1 = NameSpaceStorageCodec::EncodeSegmentStoreKey(1, 500); std::string lastKey2 = NameSpaceStorageCodec::EncodeSegmentStoreKey(501, 1000); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - SEGMENTINFOKEYPREFIX, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) + EXPECT_CALL(*mockEtcdClient_, + ListWithLimitAndRevision(SEGMENTINFOKEYPREFIX, + SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) .Times(2) .WillOnce(Return(EtcdErrCode::EtcdCanceled)) - .WillOnce(DoAll(SetArgPointee<4>(values), - SetArgPointee<5>(lastKey1), + .WillOnce(DoAll(SetArgPointee<4>(values), SetArgPointee<5>(lastKey1), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, ListWithLimitAndRevision( - lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, 2, _, _)) - .WillOnce(DoAll(SetArgPointee<4>( - std::vector{encodeSegment, encodeSegment}), + EXPECT_CALL(*mockEtcdClient_, + ListWithLimitAndRevision(lastKey1, SEGMENTINFOKEYEND, GETBUNDLE, + 2, _, _)) + .WillOnce(DoAll(SetArgPointee<4>(std::vector{ + encodeSegment, encodeSegment}), SetArgPointee<5>(lastKey2), Return(EtcdErrCode::EtcdOK))); - EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) + EXPECT_CALL(*mockEtcdClient_, GetCurrentRevision(_)) .Times(2) .WillOnce(Return(EtcdErrCode::EtcdCanceled)) .WillOnce(DoAll(SetArgPointee<0>(2), Return(EtcdErrCode::EtcdOK))); - // 设置mock的Put结果 - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue( - 1, 1024 - 32 + (1L << 30)))) + // Set the Put result of the mock + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 1024 - 32 + (1L << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) + EXPECT_CALL( + *mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 501L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 501L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 502L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 2, 502L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(1), - NameSpaceStorageCodec::EncodeSegmentAllocValue(1, 500L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(1), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 1, 500L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(2), - NameSpaceStorageCodec::EncodeSegmentAllocValue(2, 501L *(1 << 30)))) + EXPECT_CALL(*mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(2), + NameSpaceStorageCodec::EncodeSegmentAllocValue( + 2, 501L * (1 << 30)))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - EXPECT_CALL(*mockEtcdClient_, Put( - NameSpaceStorageCodec::EncodeSegmentAllocKey(3), - NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) + EXPECT_CALL( + *mockEtcdClient_, + Put(NameSpaceStorageCodec::EncodeSegmentAllocKey(3), + NameSpaceStorageCodec::EncodeSegmentAllocValue(3, 1L << 30))) .WillOnce(Return(EtcdErrCode::EtcdOK)); - // 2. 启动定期持久化线程和统计线程 + // 2. Start regular persistence and statistics threads for (int i = 1; i <= 2; i++) { allocStatistic_->AllocSpace(i, 1L << 30, i + 3); } @@ -206,21 +214,21 @@ TEST_F(AllocStatisticTest, test_PeriodicPersist_CalculateSegmentAlloc) { std::this_thread::sleep_for(std::chrono::seconds(6)); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); + ASSERT_EQ(501L * (1 << 30), alloc); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(502L *(1 << 30), alloc); + ASSERT_EQ(502L * (1 << 30), alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); - // 再通过alloc进行更新 + // Update through alloc again for (int i = 1; i <= 2; i++) { allocStatistic_->DeAllocSpace(i, 1L << 30, i + 4); } allocStatistic_->AllocSpace(3, 1L << 30, 10); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(1, &alloc)); - ASSERT_EQ(500L *(1 << 30), alloc); + ASSERT_EQ(500L * (1 << 30), alloc); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(2, &alloc)); - ASSERT_EQ(501L *(1 << 30), alloc); + ASSERT_EQ(501L * (1 << 30), alloc); ASSERT_TRUE(allocStatistic_->GetAllocByLogicalPool(3, &alloc)); ASSERT_EQ(1L << 30, alloc); std::this_thread::sleep_for(std::chrono::milliseconds(30)); diff --git a/test/tools/chunkserver_client_test.cpp b/test/tools/chunkserver_client_test.cpp index 9af94d01d3..b88d1fab08 100644 --- a/test/tools/chunkserver_client_test.cpp +++ b/test/tools/chunkserver_client_test.cpp @@ -20,14 +20,15 @@ * Author: charisu */ -#include #include "src/tools/chunkserver_client.h" -#include "test/client/fake/mockMDS.h" + +#include + #include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mockMDS.h" -using curve::chunkserver::GetChunkInfoResponse; using curve::chunkserver::CHUNK_OP_STATUS; - +using curve::chunkserver::GetChunkInfoResponse; DECLARE_string(chunkserver_list); namespace brpc { @@ -46,9 +47,7 @@ class ChunkServerClientTest : public ::testing::Test { fakemds.Initialize(); fakemds.CreateFakeChunkservers(false); } - void TearDown() { - fakemds.UnInitialize(); - } + void TearDown() { fakemds.UnInitialize(); } ChunkServerClient client; FakeMDS fakemds; }; @@ -59,37 +58,36 @@ TEST_F(ChunkServerClientTest, Init) { } TEST_F(ChunkServerClientTest, GetRaftStatus) { - std::vector statServices = - fakemds.GetRaftStateService(); - // 正常情况 + std::vector statServices = + fakemds.GetRaftStateService(); + // Normal situation butil::IOBuf iobuf; iobuf.append("test"); statServices[0]->SetBuf(iobuf); ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetRaftStatus(&iobuf)); - // 传入空指针 + // Incoming null pointer ASSERT_EQ(-1, client.GetRaftStatus(nullptr)); - // RPC失败的情况 + // The situation of RPC failure statServices[0]->SetFailed(true); ASSERT_EQ(-1, client.GetRaftStatus(&iobuf)); } TEST_F(ChunkServerClientTest, CheckChunkServerOnline) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkInfoResponse()); + std::unique_ptr response(new GetChunkInfoResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkInfo(fakeret.get()); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(true, client.CheckChunkServerOnline()); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(false, client.CheckChunkServerOnline()); } @@ -98,23 +96,23 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { auto copysetServices = fakemds.GetCreateCopysetService(); CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address("127.0.0.1:9191"); request.set_logicpoolid(1); request.set_copysetid(1001); request.set_allocated_peer(peer); request.set_queryhash(true); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); ASSERT_EQ(0, client.GetCopysetStatus(request, &response)); - // 返回码不ok的情况 + // The situation where the return code is not OK copysetServices[0]->SetStatus( COPYSET_OP_STATUS::COPYSET_OP_STATUS_COPYSET_NOTEXIST); ASSERT_EQ(-1, client.GetCopysetStatus(request, &response)); - // RPC失败的情况 + // The situation of RPC failure brpc::Controller cntl; std::unique_ptr fakeret(new FakeReturn(&cntl, nullptr)); copysetServices[0]->SetFakeReturn(fakeret.get()); @@ -122,27 +120,26 @@ TEST_F(ChunkServerClientTest, GetCopysetStatus2) { } TEST_F(ChunkServerClientTest, GetChunkHash) { - std::vector chunkServices = fakemds.GetChunkservice(); + std::vector chunkServices = fakemds.GetChunkservice(); brpc::Controller cntl; - std::unique_ptr response( - new GetChunkHashResponse()); + std::unique_ptr response(new GetChunkHashResponse()); response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_SUCCESS); response->set_hash("1234"); std::unique_ptr fakeret( new FakeReturn(&cntl, static_cast(response.get()))); chunkServices[0]->SetGetChunkHash(fakeret.get()); Chunk chunk(1, 100, 1001); - // 正常情况 + // Normal situation ASSERT_EQ(0, client.Init("127.0.0.1:9191")); std::string hash; ASSERT_EQ(0, client.GetChunkHash(chunk, &hash)); ASSERT_EQ("1234", hash); - // RPC失败的情况 + // The situation of RPC failure cntl.SetFailed("fail for test"); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); - // 返回码不为ok + // The return code is not OK response->set_status(CHUNK_OP_STATUS::CHUNK_OP_STATUS_FAILURE_UNKNOWN); ASSERT_EQ(-1, client.GetChunkHash(chunk, &hash)); }