diff --git a/curvefs_python/test.py b/curvefs_python/test.py index 0f0045fa62..eb77fd7f9e 100644 --- a/curvefs_python/test.py +++ b/curvefs_python/test.py @@ -19,11 +19,12 @@ import os + def exec_cmd(cmd): ret = os.system(cmd) if ret == 0: print cmd + " exec success" - else : + else: print cmd + " exec fail, ret = " + str(ret) @@ -37,10 +38,10 @@ def exec_cmd(cmd): exec_cmd(cmd) cmd = "curve list --user k8s --dirname /k8s" exec_cmd(cmd) -# 不是root,失败 +# Not root, failed cmd = "curve list --user k8s --dirname /" exec_cmd(cmd) -# root没有传入密码,失败 +# Root did not pass in password, failed cmd = "curve list --user root --dirname /" exec_cmd(cmd) cmd = "curve list --user root --dirname / --password root_password" diff --git a/nebd/src/common/timeutility.h b/nebd/src/common/timeutility.h index a80afb61b5..9e454f15a7 100644 --- a/nebd/src/common/timeutility.h +++ b/nebd/src/common/timeutility.h @@ -26,9 +26,10 @@ #include #include #include + +#include #include #include -#include namespace nebd { namespace common { @@ -53,7 +54,8 @@ class TimeUtility { return tm.tv_sec; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -64,7 +66,7 @@ class TimeUtility { } }; -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd -#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ +#endif // NEBD_SRC_COMMON_TIMEUTILITY_H_ diff --git a/nebd/src/part2/util.h b/nebd/src/part2/util.h index f733a04577..0894d69ebe 100644 --- a/nebd/src/part2/util.h +++ b/nebd/src/part2/util.h @@ -23,9 +23,9 @@ #ifndef NEBD_SRC_PART2_UTIL_H_ #define NEBD_SRC_PART2_UTIL_H_ -#include #include // NOLINT #include +#include #include "nebd/src/part2/define.h" @@ -51,9 +51,9 @@ class FdAllocator { FdAllocator() : fd_(0) {} ~FdAllocator() {} - // fd的有效值范围为[1, INT_MAX] + // The valid range of values for fd is [1, INT_MAX] int GetNext(); - // 初始化fd的值 + // Initialize the value of fd void InitFd(int fd); private: diff --git a/nebd/test/common/test_name_lock.cpp b/nebd/test/common/test_name_lock.cpp index 1f79ec5800..574667ad8b 100644 --- a/nebd/test/common/test_name_lock.cpp +++ b/nebd/test/common/test_name_lock.cpp @@ -21,6 +21,7 @@ */ #include + #include #include // NOLINT @@ -32,29 +33,27 @@ namespace common { TEST(TestNameLock, TestNameLockBasic) { NameLock lock1, lock2, lock3; - // lock测试 + // Lock test lock1.Lock("str1"); - // 同锁不同str可lock不死锁 + // Same lock but different strs can lock without deadlock lock1.Lock("str2"); - // 不同锁同str可lock不死锁 + // Different locks with the same str can lock without deadlock lock2.Lock("str1"); - - - // 同锁同str TryLock失败 + // Same lock with str TryLock failed ASSERT_FALSE(lock1.TryLock("str1")); - // 同锁不同str TryLock成功 + // Same lock different str TryLock successful ASSERT_TRUE(lock1.TryLock("str3")); - // 不同锁同str TryLock成功 + // Different locks with str TryLock succeeded ASSERT_TRUE(lock3.TryLock("str1")); - // unlock测试 + // Unlock test lock1.Unlock("str1"); lock1.Unlock("str2"); lock1.Unlock("str3"); lock2.Unlock("str1"); lock3.Unlock("str1"); - // 未锁unlock ok + // Unlock OK lock2.Unlock("str2"); } @@ -64,12 +63,13 @@ TEST(TestNameLock, TestNameLockGuardBasic) { NameLockGuard guard1(lock1, "str1"); NameLockGuard guard2(lock1, "str2"); NameLockGuard guard3(lock2, "str1"); - // 作用域内加锁成功,不可再加锁 + // Successfully locked within the scope, unable to lock again ASSERT_FALSE(lock1.TryLock("str1")); ASSERT_FALSE(lock1.TryLock("str2")); ASSERT_FALSE(lock2.TryLock("str1")); } - // 作用域外自动解锁,可再加锁 + // Automatically unlocking outside the scope, with the option to add locks + // again ASSERT_TRUE(lock1.TryLock("str1")); ASSERT_TRUE(lock1.TryLock("str2")); ASSERT_TRUE(lock2.TryLock("str1")); @@ -80,14 +80,14 @@ TEST(TestNameLock, TestNameLockGuardBasic) { TEST(TestNameLock, TestNameLockConcurrent) { NameLock lock1; - auto worker = [&] (const std::string &str) { + auto worker = [&](const std::string& str) { for (int i = 0; i < 10000; i++) { NameLockGuard guard(lock1, str); } }; std::vector threadpool; - for (auto &t : threadpool) { + for (auto& t : threadpool) { std::string str1 = "aaaa"; std::string str2 = "bbbb"; std::srand(std::time(nullptr)); @@ -95,12 +95,10 @@ TEST(TestNameLock, TestNameLockConcurrent) { t = std::thread(worker, rstr); } - for (auto &t : threadpool) { + for (auto& t : threadpool) { t.join(); } } - - -} // namespace common -} // namespace nebd +} // namespace common +} // namespace nebd diff --git a/proto/topology.proto b/proto/topology.proto index 6e88d4e102..f9864de5e9 100644 --- a/proto/topology.proto +++ b/proto/topology.proto @@ -415,7 +415,7 @@ message CreateLogicalPoolRequest { required LogicalPoolType type = 4; required bytes redundanceAndPlaceMentPolicy = 5; //json body required bytes userPolicy = 6; //json body - optional uint32 scatterWidth = 7; //生成copyset依据的scatterWidth平均值 + optional uint32 scatterWidth = 7; // Generate copyset based on the average scatterWidth value optional AllocateStatus status = 8; } diff --git a/src/chunkserver/cli.h b/src/chunkserver/cli.h index 3c8ecc6997..ed048dc460 100644 --- a/src/chunkserver/cli.h +++ b/src/chunkserver/cli.h @@ -33,41 +33,37 @@ namespace curve { namespace chunkserver { /** - * Cli就是配置变更相关接口的封装,方便使用,避免直接操作RPC + * Cli is the encapsulation of configuration change related interfaces, which is + * convenient to use and avoids direct RPC operations */ -// 获取leader -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - PeerId *leaderId); +// Get the leader +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + PeerId* leaderId); -// 增加一个peer -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Add a peer +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 移除一个peer -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer_id, - const braft::cli::CliOptions &options); +// Remove a peer +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const PeerId& peer_id, + const braft::cli::CliOptions& options); -// 转移leader -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Transfer leader +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const PeerId& peer, + const braft::cli::CliOptions& options); -// 触发快照 -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const PeerId &peer, - const braft::cli::CliOptions &options); +// Trigger snapshot +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const PeerId& peer, + const braft::cli::CliOptions& options); } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/cli2.cpp b/src/chunkserver/cli2.cpp index 5328724316..bf76d58934 100644 --- a/src/chunkserver/cli2.cpp +++ b/src/chunkserver/cli2.cpp @@ -22,10 +22,10 @@ #include "src/chunkserver/cli2.h" -#include -#include #include #include +#include +#include #include @@ -34,16 +34,14 @@ namespace curve { namespace chunkserver { -butil::Status GetLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - Peer *leader) { +butil::Status GetLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + Peer* leader) { if (conf.empty()) { return butil::Status(EINVAL, "Empty group configuration"); } - butil::Status st(-1, - "Fail to get leader of copyset node %s", + butil::Status st(-1, "Fail to get leader of copyset node %s", ToGroupIdString(logicPoolId, copysetId).c_str()); PeerId leaderId; Configuration::const_iterator iter = conf.begin(); @@ -53,7 +51,7 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status(-1, "Fail to init channel to %s", iter->to_string().c_str()); } - Peer *peer = new Peer(); + Peer* peer = new Peer(); CliService2_Stub stub(&channel); GetLeaderRequest2 request; GetLeaderResponse2 response; @@ -84,11 +82,9 @@ butil::Status GetLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status AddPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status AddPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -101,10 +97,10 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, AddPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *addPeer = new Peer(); + Peer* addPeer = new Peer(); request.set_allocated_addpeer(addPeer); *addPeer = peer; AddPeerResponse2 response; @@ -128,17 +124,15 @@ butil::Status AddPeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status RemovePeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status RemovePeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -151,10 +145,10 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, RemovePeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *removePeer = new Peer(); + Peer* removePeer = new Peer(); request.set_allocated_removepeer(removePeer); *removePeer = peer; RemovePeerResponse2 response; @@ -179,17 +173,15 @@ butil::Status RemovePeer(const LogicPoolID &logicPoolId, new_conf.add_peer(peer); } LOG(INFO) << "Configuration of replication group ` " - << ToGroupIdString(logicPoolId, copysetId) - << " ' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << " ' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status ChangePeers(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Configuration &newPeers, - const braft::cli::CliOptions &options) { +butil::Status ChangePeers(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Configuration& conf, + const Configuration& newPeers, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -203,11 +195,11 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, ChangePeersRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); *leaderPeer = leader; request.set_allocated_leader(leaderPeer); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ChangePeersResponse2 response; @@ -229,17 +221,15 @@ butil::Status ChangePeers(const LogicPoolID &logicPoolId, new_conf.add_peer(response.newpeers(i).address()); } LOG(INFO) << "Configuration of replication group `" - << ToGroupIdString(logicPoolId, copysetId) - << "' changed from " << old_conf - << " to " << new_conf; + << ToGroupIdString(logicPoolId, copysetId) << "' changed from " + << old_conf << " to " << new_conf; return butil::Status::OK(); } -butil::Status TransferLeader(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &conf, - const Peer &peer, - const braft::cli::CliOptions &options) { +butil::Status TransferLeader(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& conf, const Peer& peer, + const braft::cli::CliOptions& options) { Peer leader; butil::Status st = GetLeader(logicPoolId, copysetId, conf, &leader); BRAFT_RETURN_IF(!st.ok(), st); @@ -256,10 +246,10 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, TransferLeaderRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *leaderPeer = new Peer(); + Peer* leaderPeer = new Peer(); request.set_allocated_leader(leaderPeer); *leaderPeer = leader; - Peer *transfereePeer = new Peer(); + Peer* transfereePeer = new Peer(); request.set_allocated_transferee(transfereePeer); *transfereePeer = peer; TransferLeaderResponse2 response; @@ -274,18 +264,24 @@ butil::Status TransferLeader(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -// reset peer不走一致性协议,直接将peers重置,因此存在一定的风险 -// 应用场景:大多数节点挂掉的极端情况。在这种情况下,该copyset将无法写入,直 -// 到半小时后mds将挂掉的副本上的copyset迁移,因此有一段时间不可用,为了应对这种场景,引入了 -// reset peer工具,直接将复制组成员reset成只包含存活的副本。 -// 注意事项: -// 1、reset peer之前,需要通过check-copyset工具确认复制组中的大多数副本确实挂掉 -// 2、reset peer的时候,要确保剩下的副本有最新的数据,不然存在丢数据的风险 -// 3、reset peer适用于其他两个副本不能恢复的情况,不然可能会扰乱集群 -butil::Status ResetPeer(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration& newPeers, - const Peer& requestPeer, +// reset peer does not follow a consistency protocol and directly resets them, +// thus posing certain risks Application scenario: Extreme situation where most +// nodes fail. In this case, the copyset will not be able to be written directly +// After half an hour, MDS will migrate the copyset on the suspended replica, +// which will be unavailable for a period of time. To cope with this scenario, +// we have introduced The reset peer tool directly resets replication group +// members to only contain surviving replicas. +// Precautions: +// 1. Before resetting the peer, it is necessary to confirm through the +// check-copyset tool that most of the replicas in the replication group have +// indeed been suspended. +// 2. When resetting the peer, ensure that the remaining replicas have the +// latest data, otherwise there is a risk of data loss. +// 3. Reset peer is suitable for situations where the other two replicas cannot +// be restored, otherwise it may disrupt the cluster. +butil::Status ResetPeer(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& newPeers, const Peer& requestPeer, const braft::cli::CliOptions& options) { if (newPeers.empty()) { return butil::Status(EINVAL, "new_conf is empty"); @@ -294,7 +290,7 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, brpc::Channel channel; if (channel.Init(requestPeerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - requestPeerId.to_string().c_str()); + requestPeerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -302,11 +298,11 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, ResetPeerRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *requestPeerPtr = new Peer(); + Peer* requestPeerPtr = new Peer(); *requestPeerPtr = requestPeer; request.set_allocated_requestpeer(requestPeerPtr); - for (Configuration::const_iterator - iter = newPeers.begin(); iter != newPeers.end(); ++iter) { + for (Configuration::const_iterator iter = newPeers.begin(); + iter != newPeers.end(); ++iter) { request.add_newpeers()->set_address(iter->to_string()); } ResetPeerResponse2 response; @@ -318,15 +314,14 @@ butil::Status ResetPeer(const LogicPoolID &logicPoolId, return butil::Status::OK(); } -butil::Status Snapshot(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Peer& peer, +butil::Status Snapshot(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, const Peer& peer, const braft::cli::CliOptions& options) { brpc::Channel channel; PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); @@ -334,7 +329,7 @@ butil::Status Snapshot(const LogicPoolID &logicPoolId, SnapshotRequest2 request; request.set_logicpoolid(logicPoolId); request.set_copysetid(copysetId); - Peer *peerPtr = new Peer(peer); + Peer* peerPtr = new Peer(peer); request.set_allocated_peer(peerPtr); SnapshotResponse2 response; CliService2_Stub stub(&channel); @@ -351,7 +346,7 @@ butil::Status SnapshotAll(const Peer& peer, PeerId peerId(peer.address()); if (channel.Init(peerId.addr, NULL) != 0) { return butil::Status(-1, "Fail to init channel to %s", - peerId.to_string().c_str()); + peerId.to_string().c_str()); } brpc::Controller cntl; cntl.set_timeout_ms(options.timeout_ms); diff --git a/src/chunkserver/clone_task.h b/src/chunkserver/clone_task.h index 48766bce9a..cd55f0b439 100644 --- a/src/chunkserver/clone_task.h +++ b/src/chunkserver/clone_task.h @@ -25,37 +25,33 @@ #include #include + #include #include #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uncopyable.h" #include "src/chunkserver/clone_copyer.h" #include "src/chunkserver/clone_core.h" +#include "src/common/uncopyable.h" namespace curve { namespace chunkserver { using curve::common::Uncopyable; -class CloneTask : public Uncopyable - , public std::enable_shared_from_this{ +class CloneTask : public Uncopyable, + public std::enable_shared_from_this { public: CloneTask(std::shared_ptr request, std::shared_ptr core, ::google::protobuf::Closure* done) - : core_(core) - , readRequest_(request) - , done_(done) - , isComplete_(false) {} + : core_(core), readRequest_(request), done_(done), isComplete_(false) {} virtual ~CloneTask() {} virtual std::function Closure() { auto sharedThis = shared_from_this(); - return [sharedThis] () { - sharedThis->Run(); - }; + return [sharedThis]() { sharedThis->Run(); }; } virtual void Run() { @@ -65,18 +61,16 @@ class CloneTask : public Uncopyable isComplete_ = true; } - virtual bool IsComplete() { - return isComplete_; - } + virtual bool IsComplete() { return isComplete_; } protected: - // 克隆核心逻辑 + // Clone Core Logic std::shared_ptr core_; - // 此次任务相关信息 + // Information related to this task std::shared_ptr readRequest_; - // 任务结束后要执行的Closure + // Closure to be executed after the task is completed ::google::protobuf::Closure* done_; - // 任务是否结束 + // Is the task completed bool isComplete_; }; diff --git a/src/chunkserver/copyset_node.cpp b/src/chunkserver/copyset_node.cpp index a00f7aaf9a..8267c268d8 100755 --- a/src/chunkserver/copyset_node.cpp +++ b/src/chunkserver/copyset_node.cpp @@ -22,33 +22,34 @@ #include "src/chunkserver/copyset_node.h" -#include -#include -#include #include -#include #include -#include -#include +#include +#include +#include +#include + #include #include -#include -#include -#include #include #include +#include +#include +#include +#include +#include -#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" #include "src/chunkserver/chunk_closure.h" -#include "src/chunkserver/op_request.h" -#include "src/common/concurrent/task_thread_pool.h" -#include "src/fs/fs_common.h" #include "src/chunkserver/copyset_node_manager.h" -#include "src/chunkserver/datastore/define.h" #include "src/chunkserver/datastore/datastore_file_helper.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/datastore/define.h" +#include "src/chunkserver/op_request.h" +#include "src/chunkserver/raftsnapshot/curve_filesystem_adaptor.h" +#include "src/common/concurrent/task_thread_pool.h" #include "src/common/crc32.h" #include "src/common/fs_util.h" +#include "src/common/uri_parser.h" +#include "src/fs/fs_common.h" namespace braft { DECLARE_bool(raft_enable_leader_lease); @@ -59,37 +60,36 @@ namespace chunkserver { using curve::fs::FileSystemInfo; -const char *kCurveConfEpochFilename = "conf.epoch"; +const char* kCurveConfEpochFilename = "conf.epoch"; uint32_t CopysetNode::syncTriggerSeconds_ = 25; -std::shared_ptr> - CopysetNode::copysetSyncPool_ = nullptr; - -CopysetNode::CopysetNode(const LogicPoolID &logicPoolId, - const CopysetID ©setId, - const Configuration &initConf) : - logicPoolId_(logicPoolId), - copysetId_(copysetId), - conf_(initConf), - epoch_(0), - peerId_(), - nodeOptions_(), - raftNode_(nullptr), - chunkDataApath_(), - chunkDataRpath_(), - appliedIndex_(0), - leaderTerm_(-1), - configChange_(std::make_shared()), - lastSnapshotIndex_(0), - scaning_(false), - lastScanSec_(0), - enableOdsyncWhenOpenChunkFile_(false), - isSyncing_(false), - checkSyncingIntervalMs_(500) { -} +std::shared_ptr> CopysetNode::copysetSyncPool_ = + nullptr; + +CopysetNode::CopysetNode(const LogicPoolID& logicPoolId, + const CopysetID& copysetId, + const Configuration& initConf) + : logicPoolId_(logicPoolId), + copysetId_(copysetId), + conf_(initConf), + epoch_(0), + peerId_(), + nodeOptions_(), + raftNode_(nullptr), + chunkDataApath_(), + chunkDataRpath_(), + appliedIndex_(0), + leaderTerm_(-1), + configChange_(std::make_shared()), + lastSnapshotIndex_(0), + scaning_(false), + lastScanSec_(0), + enableOdsyncWhenOpenChunkFile_(false), + isSyncing_(false), + checkSyncingIntervalMs_(500) {} CopysetNode::~CopysetNode() { - // 移除 copyset的metric + // Remove metric from copyset ChunkServerMetric::GetInstance()->RemoveCopysetMetric(logicPoolId_, copysetId_); metric_ = nullptr; @@ -98,17 +98,16 @@ CopysetNode::~CopysetNode() { delete nodeOptions_.snapshot_file_system_adaptor; nodeOptions_.snapshot_file_system_adaptor = nullptr; } - LOG(INFO) << "release copyset node: " - << GroupIdString(); + LOG(INFO) << "release copyset node: " << GroupIdString(); } -int CopysetNode::Init(const CopysetNodeOptions &options) { +int CopysetNode::Init(const CopysetNodeOptions& options) { std::string groupId = GroupId(); std::string protocol = curve::common::UriParser::ParseUri( options.chunkDataUri, ©setDirPath_); if (protocol.empty()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "not support chunk data uri's protocol" << " error chunkDataDir is: " << options.chunkDataUri << ". Copyset: " << GroupIdString(); @@ -135,12 +134,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { dsOptions.locationLimit = options.locationLimit; dsOptions.enableOdsyncWhenOpenChunkFile = options.enableOdsyncWhenOpenChunkFile; - dataStore_ = std::make_shared(options.localFileSystem, - options.chunkFilePool, - dsOptions); + dataStore_ = std::make_shared( + options.localFileSystem, options.chunkFilePool, dsOptions); CHECK(nullptr != dataStore_); if (false == dataStore_->Initialize()) { - // TODO(wudemiao): 增加必要的错误码并返回 + // TODO(wudemiao): Add necessary error codes and return LOG(ERROR) << "data store init failed. " << "Copyset: " << GroupIdString(); return -1; @@ -150,10 +148,10 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { syncThread_.Init(this); dataStore_->SetCacheCondPtr(syncThread_.cond_); dataStore_->SetCacheLimits(options.syncChunkLimit, - options.syncThreshold); + options.syncThreshold); LOG(INFO) << "init sync thread success limit = " - << options.syncChunkLimit << - "syncthreshold = " << options.syncThreshold; + << options.syncChunkLimit + << "syncthreshold = " << options.syncThreshold; } recyclerUri_ = options.recyclerUri; @@ -166,21 +164,21 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { // initialize raft node options corresponding to the copy set node InitRaftNodeOptions(options); - /* 初始化 peer id */ + /* Initialize peer id */ butil::ip_t ip; butil::str2ip(options.ip.c_str(), &ip); butil::EndPoint addr(ip, options.port); /** - * idx默认是零,在chunkserver不允许一个进程有同一个copyset的多副本, - * 这一点注意和不让braft区别开来 + * The default idx is zero, and chunkserver does not allow a process to have + * multiple copies of the same copyset, Pay attention to this point and not + * distinguish between braces */ peerId_ = PeerId(addr, 0); raftNode_ = std::make_shared(groupId, peerId_); concurrentapply_ = options.concurrentapply; - /* - * 初始化copyset性能metrics + * Initialize copyset performance metrics */ int ret = ChunkServerMetric::GetInstance()->CreateCopysetMetric( logicPoolId_, copysetId_); @@ -189,10 +187,11 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { << "Copyset: " << GroupIdString(); return -1; } - metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric( - logicPoolId_, copysetId_); + metric_ = ChunkServerMetric::GetInstance()->GetCopysetMetric(logicPoolId_, + copysetId_); if (metric_ != nullptr) { - // TODO(yyk) 后续考虑添加datastore层面的io metric + // TODO(yyk) will consider adding io metrics at the datastore level in + // the future metric_->MonitorDataStore(dataStore_.get()); } @@ -213,7 +212,7 @@ int CopysetNode::Init(const CopysetNodeOptions &options) { } int CopysetNode::Run() { - // raft node的初始化实际上让起run起来 + // The initialization of the raft node actually starts running if (0 != raftNode_->init(nodeOptions_)) { LOG(ERROR) << "Fail to init raft node. " << "Copyset: " << GroupIdString(); @@ -237,19 +236,20 @@ void CopysetNode::Fini() { WaitSnapshotDone(); if (nullptr != raftNode_) { - // 关闭所有关于此raft node的服务 + // Close all services related to this raft node raftNode_->shutdown(nullptr); - // 等待所有的正在处理的task结束 + // Waiting for all tasks being processed to end raftNode_->join(); } if (nullptr != concurrentapply_) { - // 将未刷盘的数据落盘,如果不刷盘 - // 迁移copyset时,copyset移除后再去执行WriteChunk操作可能出错 + // Drop the data that has not been flushed onto the disk, if not flushed + // When migrating a copyset, removing the copyset before executing the + // WriteChunk operation may result in errors concurrentapply_->Flush(); } } -void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { +void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions& options) { auto groupId = GroupId(); nodeOptions_.initial_conf = conf_; nodeOptions_.election_timeout_ms = options.electionTimeoutMs; @@ -257,20 +257,19 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { nodeOptions_.node_owns_fsm = false; nodeOptions_.snapshot_interval_s = options.snapshotIntervalS; nodeOptions_.log_uri = options.logUri; - nodeOptions_.log_uri.append("/").append(groupId) - .append("/").append(RAFT_LOG_DIR); + nodeOptions_.log_uri.append("/").append(groupId).append("/").append( + RAFT_LOG_DIR); nodeOptions_.raft_meta_uri = options.raftMetaUri; - nodeOptions_.raft_meta_uri.append("/").append(groupId) - .append("/").append(RAFT_META_DIR); + nodeOptions_.raft_meta_uri.append("/").append(groupId).append("/").append( + RAFT_META_DIR); nodeOptions_.snapshot_uri = options.raftSnapshotUri; - nodeOptions_.snapshot_uri.append("/").append(groupId) - .append("/").append(RAFT_SNAP_DIR); + nodeOptions_.snapshot_uri.append("/").append(groupId).append("/").append( + RAFT_SNAP_DIR); nodeOptions_.usercode_in_pthread = options.usercodeInPthread; nodeOptions_.snapshot_throttle = options.snapshotThrottle; - CurveFilesystemAdaptor* cfa = - new CurveFilesystemAdaptor(options.chunkFilePool, - options.localFileSystem); + CurveFilesystemAdaptor* cfa = new CurveFilesystemAdaptor( + options.chunkFilePool, options.localFileSystem); std::vector filterList; std::string snapshotMeta(BRAFT_SNAPSHOT_META_FILE); filterList.push_back(kCurveConfEpochFilename); @@ -282,37 +281,42 @@ void CopysetNode::InitRaftNodeOptions(const CopysetNodeOptions &options) { new scoped_refptr(cfa); } -void CopysetNode::on_apply(::braft::Iterator &iter) { +void CopysetNode::on_apply(::braft::Iterator& iter) { for (; iter.valid(); iter.next()) { - // 放在bthread中异步执行,避免阻塞当前状态机的执行 + // Asynchronous execution in bthread to avoid blocking the execution of + // the current state machine braft::AsyncClosureGuard doneGuard(iter.done()); /** - * 获取向braft提交任务时候传递的ChunkClosure,里面包含了 - * Op的所有上下文 ChunkOpRequest + * Obtain the ChunkClosure passed when submitting tasks to Braft, which + * includes All Contextual ChunkOpRequest for Op */ - braft::Closure *closure = iter.done(); + braft::Closure* closure = iter.done(); if (nullptr != closure) { /** - * 1.closure不是null,那么说明当前节点正常,直接从内存中拿到Op - * context进行apply + * 1. If the closure is not null, it indicates that the current node + * is normal and Op is directly obtained from memory Apply in + * context */ - ChunkClosure - *chunkClosure = dynamic_cast(iter.done()); + ChunkClosure* chunkClosure = + dynamic_cast(iter.done()); CHECK(nullptr != chunkClosure) << "ChunkClosure dynamic cast failed"; std::shared_ptr& opRequest = chunkClosure->request_; - concurrentapply_->Push(opRequest->ChunkId(), ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT - &ChunkOpRequest::OnApply, opRequest, - iter.index(), doneGuard.release()); + concurrentapply_->Push( + opRequest->ChunkId(), + ChunkOpRequest::Schedule(opRequest->OpType()), // NOLINT + &ChunkOpRequest::OnApply, opRequest, iter.index(), + doneGuard.release()); } else { - // 获取log entry + // Obtain log entry butil::IOBuf log = iter.data(); /** - * 2.closure是null,有两种情况: - * 2.1. 节点重启,回放apply,这里会将Op log entry进行反序列化, - * 然后获取Op信息进行apply + * 2. If the closure is null, there are two situations: + * 2.1. Restart the node and replay the application. Here, the Op + * log entry will be deserialized, Then obtain Op information + * for application * 2.2. follower apply */ ChunkRequest request; @@ -320,9 +324,10 @@ void CopysetNode::on_apply(::braft::Iterator &iter) { auto opReq = ChunkOpRequest::Decode(log, &request, &data, iter.index(), GetLeaderId()); auto chunkId = request.chunkid(); - concurrentapply_->Push(chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT - &ChunkOpRequest::OnApplyFromLog, opReq, - dataStore_, std::move(request), data); + concurrentapply_->Push( + chunkId, ChunkOpRequest::Schedule(request.optype()), // NOLINT + &ChunkOpRequest::OnApplyFromLog, opReq, dataStore_, + std::move(request), data); } } } @@ -331,11 +336,11 @@ void CopysetNode::on_shutdown() { LOG(INFO) << GroupIdString() << " is shutdown"; } -void CopysetNode::on_snapshot_save(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::on_snapshot_save(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { snapshotFuture_ = - std::async(std::launch::async, - &CopysetNode::save_snapshot_background, this, writer, done); + std::async(std::launch::async, &CopysetNode::save_snapshot_background, + this, writer, done); } void CopysetNode::WaitSnapshotDone() { @@ -345,12 +350,12 @@ void CopysetNode::WaitSnapshotDone() { } } -void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, - ::braft::Closure *done) { +void CopysetNode::save_snapshot_background(::braft::SnapshotWriter* writer, + ::braft::Closure* done) { brpc::ClosureGuard doneGuard(done); /** - * 1.flush I/O to disk,确保数据都落盘 + * 1. flush I/O to disk to ensure that all data is dropped */ concurrentapply_->Flush(); @@ -359,37 +364,41 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 2.保存配置版本: conf.epoch,注意conf.epoch是存放在data目录下 + * 2. Save the configuration version: conf.epoch, please note that + * conf.epoch is stored in the data directory */ - std::string - filePathTemp = writer->get_path() + "/" + kCurveConfEpochFilename; + std::string filePathTemp = + writer->get_path() + "/" + kCurveConfEpochFilename; if (0 != SaveConfEpoch(filePathTemp)) { done->status().set_error(errno, "invalid: %s", strerror(errno)); LOG(ERROR) << "SaveConfEpoch failed. " - << "Copyset: " << GroupIdString() - << ", errno: " << errno << ", " + << "Copyset: " << GroupIdString() << ", errno: " << errno + << ", " << ", error message: " << strerror(errno); return; } /** - * 3.保存chunk文件名的列表到快照元数据文件中 + * 3. Save the list of chunk file names to the snapshot metadata file */ std::vector files; if (0 == fs_->List(chunkDataApath_, &files)) { for (const auto& fileName : files) { - // raft保存快照时,meta信息中不用保存快照文件列表 - // raft下载快照的时候,在下载完chunk以后,会单独获取snapshot列表 + // When saving a snapshot in the raft, there is no need to save the + // list of snapshot files in the meta information. + // When raft downloads a snapshot, after downloading the chunk, + // a separate snapshot list will be obtained. bool isSnapshot = DatastoreFileHelper::IsSnapshotFile(fileName); if (isSnapshot) { continue; } std::string chunkApath; - // 通过绝对路径,算出相对于快照目录的路径 + // Calculate the path relative to the snapshot directory through + // absolute path chunkApath.append(chunkDataApath_); chunkApath.append("/").append(fileName); - std::string filePath = curve::common::CalcRelativePath( - writer->get_path(), chunkApath); + std::string filePath = + curve::common::CalcRelativePath(writer->get_path(), chunkApath); writer->add_file(filePath); } } else { @@ -401,16 +410,16 @@ void CopysetNode::save_snapshot_background(::braft::SnapshotWriter *writer, } /** - * 4. 保存conf.epoch文件到快照元数据文件中 + * 4. Save the conf.epoch file to the snapshot metadata file */ - writer->add_file(kCurveConfEpochFilename); + writer->add_file(kCurveConfEpochFilename); } -int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { +int CopysetNode::on_snapshot_load(::braft::SnapshotReader* reader) { /** - * 1. 加载快照数据 + * 1. Loading snapshot data */ - // 打开的 snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 + // Open snapshot path: /mnt/sda/1-10001/raft_snapshot/snapshot_0043 std::string snapshotPath = reader->get_path(); // /mnt/sda/1-10001/raft_snapshot/snapshot_0043/data @@ -419,15 +428,21 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { snapshotChunkDataDir.append("/").append(chunkDataRpath_); LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir << ", Copyset: " << GroupIdString(); - // 如果数据目录不存在,那么说明 load snapshot 数据部分就不需要处理 + // If the data directory does not exist, then the load snapshot data section + // does not need to be processed if (fs_->DirExists(snapshotChunkDataDir)) { - // 加载快照数据前,要先清理copyset data目录下的文件 - // 否则可能导致快照加载以后存在一些残留的数据 - // 如果delete_file失败或者rename失败,当前node状态会置为ERROR - // 如果delete_file或者rename期间进程重启,copyset起来后会加载快照 - // 由于rename可以保证原子性,所以起来加载快照后,data目录一定能还原 - bool ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - delete_file(chunkDataApath_, true); + // Before loading snapshot data, clean the files in the copyset data + // directory first Otherwise, it may result in some residual data after + // the snapshot is loaded. + // If delete_file or rename fails, the current node status will be set + // to ERROR. + // If delete_file or during the renamethe process restarts, and + // after copyset is set, the snapshot will be loaded Since rename + // ensures atomicity, after loading the snapshot, the data directory + // must be restored. + bool ret = + nodeOptions_.snapshot_file_system_adaptor->get()->delete_file( + chunkDataApath_, true); if (!ret) { LOG(ERROR) << "delete chunk data dir failed. " << "Copyset: " << GroupIdString() @@ -437,8 +452,8 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { LOG(INFO) << "delete chunk data dir success. " << "Copyset: " << GroupIdString() << ", path: " << chunkDataApath_; - ret = nodeOptions_.snapshot_file_system_adaptor->get()-> - rename(snapshotChunkDataDir, chunkDataApath_); + ret = nodeOptions_.snapshot_file_system_adaptor->get()->rename( + snapshotChunkDataDir, chunkDataApath_); if (!ret) { LOG(ERROR) << "rename snapshot data dir " << snapshotChunkDataDir << "to chunk data dir " << chunkDataApath_ << " failed. " @@ -449,13 +464,13 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { << "to chunk data dir " << chunkDataApath_ << " success. " << "Copyset: " << GroupIdString(); } else { - LOG(INFO) << "load snapshot data path: " - << snapshotChunkDataDir << " not exist. " + LOG(INFO) << "load snapshot data path: " << snapshotChunkDataDir + << " not exist. " << "Copyset: " << GroupIdString(); } /** - * 2. 加载配置版本文件 + * 2. Load Configuration Version File */ std::string filePath = reader->get_path() + "/" + kCurveConfEpochFilename; if (fs_->FileExists(filePath)) { @@ -468,20 +483,25 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 3.重新init data store,场景举例: + * 3. Reinitializing the data store, with examples: * - * (1) 例如一个add peer,之后立马read这个时候data store会返回chunk - * not exist,因为这个新增的peer在刚开始起来的时候,没有任何数据,这 - * 个时候data store init了,那么新增的peer在leader恢复了数据之后, - * data store并不感知; + * (1) For instance, when adding a new peer and immediately reading data, + * the data store may return "chunk not exist." This is because the newly + * added peer initially has no data, and when the data store is initialized, + * it is not aware of the data that the new peer receives after the leader + * recovers its data. * - * (2) peer通过install snapshot恢复了所有的数据是通过rename操作的, - * 如果某个file之前被data store打开了,那么rename能成功,但是老的 - * 文件只有等data store close老的文件才能删除,所以需要重新init data - * store,并且close的文件的fd,然后重新open新的文件,不然data store - * 会一直是操作的老的文件,而一旦data store close相应的fd一次之后, - * 后面的write的数据就会丢,除此之外,如果 datastore init没有重新open - * 文件,也将导致read不到恢复过来的数据,而是read到老的数据。 + * (2) When a peer recovers all of its data through an install snapshot + * operation, it is performed through a rename operation. If a file was + * previously open in the data store, the rename operation can succeed, but + * the old file can only be deleted after the data store closes it. + * Therefore, it is necessary to reinitialize the data store, close the + * file's file descriptor (fd), and then reopen the new file. Otherwise, the + * data store will continue to operate on the old file. Once the data store + * closes, the corresponding fd, any subsequent write operations will be + * lost. Additionally, if the datastore is not reinitialized and the new + * file is not reopened, it may result in reading the old data rather than + * the recovered data. */ if (!dataStore_->Initialize()) { LOG(ERROR) << "data store init failed in on snapshot load. " @@ -490,8 +510,9 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { } /** - * 4.如果snapshot中存 conf,那么加载初始化,保证不需要以来 - * on_configuration_committed。需要注意的是这里会忽略joint stage的日志。 + * 4. If conf is stored in the snapshot, load initialization to ensure that + * there is no need for on_configuration_committed. It should be noted + * that the log of the joint stage will be ignored here. */ braft::SnapshotMeta meta; reader->load_meta(&meta); @@ -510,7 +531,7 @@ int CopysetNode::on_snapshot_load(::braft::SnapshotReader *reader) { void CopysetNode::on_leader_start(int64_t term) { /* - * Invoke order in on_leader_start: + * Invoke order in on_leader_start: * 1. flush concurrent apply queue. * 2. set term in states machine. * @@ -536,7 +557,7 @@ void CopysetNode::on_leader_start(int64_t term) { << " become leader, term is: " << leaderTerm_; } -void CopysetNode::on_leader_stop(const butil::Status &status) { +void CopysetNode::on_leader_stop(const butil::Status& status) { (void)status; leaderTerm_.store(-1, std::memory_order_release); ChunkServerMetric::GetInstance()->DecreaseLeaderCount(); @@ -544,7 +565,7 @@ void CopysetNode::on_leader_stop(const butil::Status &status) { << ", peer id: " << peerId_.to_string() << " stepped down"; } -void CopysetNode::on_error(const ::braft::Error &e) { +void CopysetNode::on_error(const ::braft::Error& e) { LOG(FATAL) << "Copyset: " << GroupIdString() << ", peer id: " << peerId_.to_string() << " meet raft error: " << e; @@ -556,7 +577,7 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, // Loading snapshot should not increase epoch. When loading // snapshot, the index is equal with lastSnapshotIndex_. LOG(INFO) << "index: " << index - << ", lastSnapshotIndex_: " << lastSnapshotIndex_; + << ", lastSnapshotIndex_: " << lastSnapshotIndex_; if (index != lastSnapshotIndex_) { std::unique_lock lock_guard(confLock_); conf_ = conf; @@ -569,63 +590,47 @@ void CopysetNode::on_configuration_committed(const Configuration& conf, << ", epoch: " << epoch_.load(std::memory_order_acquire); } -void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_stop_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << " stops following" << ctx; + << ", peer id: " << peerId_.to_string() << " stops following" + << ctx; } -void CopysetNode::on_start_following(const ::braft::LeaderChangeContext &ctx) { +void CopysetNode::on_start_following(const ::braft::LeaderChangeContext& ctx) { LOG(INFO) << "Copyset: " << GroupIdString() - << ", peer id: " << peerId_.to_string() - << "start following" << ctx; + << ", peer id: " << peerId_.to_string() << "start following" + << ctx; } -LogicPoolID CopysetNode::GetLogicPoolId() const { - return logicPoolId_; -} +LogicPoolID CopysetNode::GetLogicPoolId() const { return logicPoolId_; } -CopysetID CopysetNode::GetCopysetId() const { - return copysetId_; -} +CopysetID CopysetNode::GetCopysetId() const { return copysetId_; } -void CopysetNode::SetScan(bool scan) { - scaning_ = scan; -} +void CopysetNode::SetScan(bool scan) { scaning_ = scan; } -bool CopysetNode::GetScan() const { - return scaning_; -} +bool CopysetNode::GetScan() const { return scaning_; } -void CopysetNode::SetLastScan(uint64_t time) { - lastScanSec_ = time; -} +void CopysetNode::SetLastScan(uint64_t time) { lastScanSec_ = time; } -uint64_t CopysetNode::GetLastScan() const { - return lastScanSec_; -} +uint64_t CopysetNode::GetLastScan() const { return lastScanSec_; } std::vector& CopysetNode::GetFailedScanMap() { return failedScanMaps_; } -std::string CopysetNode::GetCopysetDir() const { - return copysetDirPath_; -} +std::string CopysetNode::GetCopysetDir() const { return copysetDirPath_; } uint64_t CopysetNode::GetConfEpoch() const { std::lock_guard lockguard(confLock_); return epoch_.load(std::memory_order_relaxed); } -int CopysetNode::LoadConfEpoch(const std::string &filePath) { +int CopysetNode::LoadConfEpoch(const std::string& filePath) { LogicPoolID loadLogicPoolID = 0; CopysetID loadCopysetID = 0; uint64_t loadEpoch = 0; - int ret = epochFile_->Load(filePath, - &loadLogicPoolID, - &loadCopysetID, + int ret = epochFile_->Load(filePath, &loadLogicPoolID, &loadCopysetID, &loadEpoch); if (0 == ret) { if (logicPoolId_ != loadLogicPoolID || copysetId_ != loadCopysetID) { @@ -643,7 +648,7 @@ int CopysetNode::LoadConfEpoch(const std::string &filePath) { return ret; } -int CopysetNode::SaveConfEpoch(const std::string &filePath) { +int CopysetNode::SaveConfEpoch(const std::string& filePath) { return epochFile_->Save(filePath, logicPoolId_, copysetId_, epoch_); } @@ -678,17 +683,17 @@ void CopysetNode::SetCopysetNode(std::shared_ptr node) { raftNode_ = node; } -void CopysetNode::SetSnapshotFileSystem(scoped_refptr *fs) { +void CopysetNode::SetSnapshotFileSystem(scoped_refptr* fs) { nodeOptions_.snapshot_file_system_adaptor = fs; } bool CopysetNode::IsLeaderTerm() const { - if (0 < leaderTerm_.load(std::memory_order_acquire)) - return true; + if (0 < leaderTerm_.load(std::memory_order_acquire)) return true; return false; } -bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseLeader( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT /* * Why not use lease_status.state==LEASE_VALID directly to judge? * @@ -707,13 +712,12 @@ bool CopysetNode::IsLeaseLeader(const braft::LeaderLeaseStatus &lease_status) co return term > 0 && term == lease_status.term; } -bool CopysetNode::IsLeaseExpired(const braft::LeaderLeaseStatus &lease_status) const { // NOLINT +bool CopysetNode::IsLeaseExpired( + const braft::LeaderLeaseStatus& lease_status) const { // NOLINT return lease_status.state == braft::LEASE_EXPIRED; } -PeerId CopysetNode::GetLeaderId() const { - return raftNode_->leader_id(); -} +PeerId CopysetNode::GetLeaderId() const { return raftNode_->leader_id(); } butil::Status CopysetNode::TransferLeader(const Peer& peer) { butil::Status status; @@ -722,15 +726,15 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { if (raftNode_->leader_id() == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << "Skipped transferring leader to leader itself. " - << "peerid: " << peerId - << ", Copyset: " << GroupIdString(); + << "peerid: " << peerId << ", Copyset: " << GroupIdString(); return status; } int rc = raftNode_->transfer_leadership_to(peerId); if (rc != 0) { - status = butil::Status(rc, "Failed to transfer leader of copyset " + status = butil::Status(rc, + "Failed to transfer leader of copyset " "%s to peer %s, error: %s", GroupIdString().c_str(), peerId.to_string().c_str(), berror(rc)); @@ -741,9 +745,8 @@ butil::Status CopysetNode::TransferLeader(const Peer& peer) { transferee_ = peer; status = butil::Status::OK(); - LOG(INFO) << "Transferred leader of copyset " - << GroupIdString() - << " to peer " << peerId; + LOG(INFO) << "Transferred leader of copyset " << GroupIdString() + << " to peer " << peerId; return status; } @@ -761,14 +764,13 @@ butil::Status CopysetNode::AddPeer(const Peer& peer) { if (peer == peerId) { butil::Status status = butil::Status::OK(); DVLOG(6) << peerId << " is already a member of copyset " - << GroupIdString() - << ", skip adding peer"; + << GroupIdString() << ", skip adding peer"; return status; } } ConfigurationChangeDone* addPeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::ADD_PEER, peer); addPeerDone->expectedCfgChange = expectedCfgChange; raftNode_->add_peer(peerId, addPeerDone); @@ -797,13 +799,13 @@ butil::Status CopysetNode::RemovePeer(const Peer& peer) { if (!peerValid) { butil::Status status = butil::Status::OK(); - DVLOG(6) << peerId << " is not a member of copyset " - << GroupIdString() << ", skip removing"; + DVLOG(6) << peerId << " is not a member of copyset " << GroupIdString() + << ", skip removing"; return status; } ConfigurationChangeDone* removePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange(ConfigChangeType::REMOVE_PEER, peer); removePeerDone->expectedCfgChange = expectedCfgChange; raftNode_->remove_peer(peerId, removePeerDone); @@ -831,7 +833,7 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { return st; } ConfigurationChangeDone* changePeerDone = - new ConfigurationChangeDone(configChange_); + new ConfigurationChangeDone(configChange_); ConfigurationChange expectedCfgChange; expectedCfgChange.type = ConfigChangeType::CHANGE_PEER; expectedCfgChange.alterPeer.set_address(adding.begin()->to_string()); @@ -845,18 +847,22 @@ butil::Status CopysetNode::ChangePeer(const std::vector& newPeers) { void CopysetNode::UpdateAppliedIndex(uint64_t index) { uint64_t curIndex = appliedIndex_.load(std::memory_order_acquire); - // 只更新比自己大的 index + // Only update indexes larger than oneself if (index > curIndex) { /** - * compare_exchange_strong解释: - * 首先比较curIndex是不是等于appliedIndex,如果是,那么说明没有人 - * 修改appliedindex,那么用index去修改appliedIndex,更新成功,完成; - * 如果不等于,说明有人更新了appliedindex,那么通过curIndex返回当前 - * 的appliedindex,并且返回false。整个过程都是原子的 + * Explanation of compare_exchange_strong: + * First, it compares whether curIndex is equal to appliedIndex. If it + * is equal, it means that no one has modified appliedindex. In this + * case, it tries to update appliedIndex with the value of index, and if + * the update is successful, it's done. If curIndex is not equal to + * appliedindex, it indicates that someone else has updated appliedIndex + * in the meantime. In this case, it returns the current value of + * appliedindex through curIndex and returns false. This entire process + * is atomic. */ - while (!appliedIndex_.compare_exchange_strong(curIndex, - index, - std::memory_order_acq_rel)) { //NOLINT + while (!appliedIndex_.compare_exchange_strong( + curIndex, index, + std::memory_order_acq_rel)) { // NOLINT if (index <= curIndex) { break; } @@ -876,27 +882,29 @@ CurveSegmentLogStorage* CopysetNode::GetLogStorage() const { return logStorage_; } -ConcurrentApplyModule *CopysetNode::GetConcurrentApplyModule() const { +ConcurrentApplyModule* CopysetNode::GetConcurrentApplyModule() const { return concurrentapply_; } -void CopysetNode::Propose(const braft::Task &task) { - raftNode_->apply(task); -} +void CopysetNode::Propose(const braft::Task& task) { raftNode_->apply(task); } -int CopysetNode::GetConfChange(ConfigChangeType *type, - Configuration *oldConf, - Peer *alterPeer) { +int CopysetNode::GetConfChange(ConfigChangeType* type, Configuration* oldConf, + Peer* alterPeer) { /** - * 避免new leader当选leader之后,提交noop entry之前,epoch和 - * 配置可能不一致的情况。考虑如下情形: + * To prevent inconsistencies between the epoch and configuration before + * a new leader is elected and a noop entry is committed, consider the + * following scenario: * - * 三个成员的复制组{ABC},当前epoch=5,A是leader,收到配置配置+D, - * 假设B收到了{ABC+D}的配置变更日志,然后leader A挂了,B当选为了 - * new leader,在B提交noop entry之前,B上查询到的epoch值最大可能为5, - * 而查询到的配置确实{ABCD}了,所以这里在new leader B在提交noop entry - * 之前,也就是实现隐公提交配置变更日志{ABC+D}之前,不允许向用户返回 - * 配置和配置变更信息,避免epoch和配置信息不一致 + * In a replication group with three members {ABC}, the current epoch is 5, + * and A is the leader. A receives a configuration change log that adds D, + * and assume that B also receives the configuration change log {ABC+D}. + * Then, leader A crashes, and B is elected as the new leader. Before B + * commits the noop entry, the maximum epoch value it can query on B is + * still 5, but the queried configuration is {ABCD}. Therefore, here, before + * the new leader B commits the noop entry, which is effectively committing + * the hidden configuration change log {ABC+D}, it does not allow returning + * the configuration and configuration change information to the user to + * avoid epoch and configuration information inconsistency. */ if (leaderTerm_.load(std::memory_order_acquire) <= 0) { *type = ConfigChangeType::NONE; @@ -922,9 +930,9 @@ uint64_t CopysetNode::LeaderTerm() const { return leaderTerm_.load(std::memory_order_acquire); } -int CopysetNode::GetHash(std::string *hash) { +int CopysetNode::GetHash(std::string* hash) { int ret = 0; - int fd = 0; + int fd = 0; int len = 0; uint32_t crc32c = 0; std::vector files; @@ -934,7 +942,8 @@ int CopysetNode::GetHash(std::string *hash) { return -1; } - // 计算所有chunk文件crc需要保证计算的顺序是一样的 + // Calculating all chunk files' crc requires ensuring that the order of + // calculations is the same std::sort(files.begin(), files.end()); for (std::string file : files) { @@ -953,7 +962,7 @@ int CopysetNode::GetHash(std::string *hash) { } len = fileInfo.st_size; - char *buff = new (std::nothrow) char[len]; + char* buff = new (std::nothrow) char[len]; if (nullptr == buff) { return -1; } @@ -974,15 +983,15 @@ int CopysetNode::GetHash(std::string *hash) { return 0; } -void CopysetNode::GetStatus(NodeStatus *status) { +void CopysetNode::GetStatus(NodeStatus* status) { raftNode_->get_status(status); } -void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus *status) { +void CopysetNode::GetLeaderLeaseStatus(braft::LeaderLeaseStatus* status) { raftNode_->get_leader_lease_status(status); } -bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { +bool CopysetNode::GetLeaderStatus(NodeStatus* leaderStaus) { NodeStatus status; GetStatus(&status); if (status.leader_id.is_empty()) { @@ -997,16 +1006,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { brpc::Controller cntl; cntl.set_timeout_ms(500); brpc::Channel channel; - if (channel.Init(status.leader_id.addr, nullptr) !=0) { - LOG(WARNING) << "can not create channel to " - << status.leader_id.addr + if (channel.Init(status.leader_id.addr, nullptr) != 0) { + LOG(WARNING) << "can not create channel to " << status.leader_id.addr << ", copyset " << GroupIdString(); return false; } CopysetStatusRequest request; CopysetStatusResponse response; - curve::common::Peer *peer = new curve::common::Peer(); + curve::common::Peer* peer = new curve::common::Peer(); peer->set_address(status.leader_id.to_string()); request.set_logicpoolid(logicPoolId_); request.set_copysetid(copysetId_); @@ -1016,16 +1024,15 @@ bool CopysetNode::GetLeaderStatus(NodeStatus *leaderStaus) { CopysetService_Stub stub(&channel); stub.GetCopysetStatus(&cntl, &request, &response, nullptr); if (cntl.Failed()) { - LOG(WARNING) << "get leader status failed: " - << cntl.ErrorText() + LOG(WARNING) << "get leader status failed: " << cntl.ErrorText() << ", copyset " << GroupIdString(); return false; } if (response.status() != COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS) { LOG(WARNING) << "get leader status failed" - << ", status: " << response.status() - << ", copyset " << GroupIdString(); + << ", status: " << response.status() << ", copyset " + << GroupIdString(); return false; } @@ -1078,9 +1085,8 @@ void CopysetNode::SyncAllChunks() { CSErrorCode r = dataStore_->SyncChunk(chunk); if (r != CSErrorCode::Success) { LOG(FATAL) << "Sync Chunk failed in Copyset: " - << GroupIdString() - << ", chunkid: " << chunk - << " data store return: " << r; + << GroupIdString() << ", chunkid: " << chunk + << " data store return: " << r; } }); } @@ -1093,11 +1099,11 @@ void SyncChunkThread::Init(CopysetNode* node) { } void SyncChunkThread::Run() { - syncThread_ = std::thread([this](){ + syncThread_ = std::thread([this]() { while (running_) { std::unique_lock lock(mtx_); - cond_->wait_for(lock, - std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); + cond_->wait_for( + lock, std::chrono::seconds(CopysetNode::syncTriggerSeconds_)); node_->SyncAllChunks(); } }); @@ -1111,9 +1117,7 @@ void SyncChunkThread::Stop() { } } -SyncChunkThread::~SyncChunkThread() { - Stop(); -} +SyncChunkThread::~SyncChunkThread() { Stop(); } } // namespace chunkserver } // namespace curve diff --git a/src/chunkserver/trash.cpp b/src/chunkserver/trash.cpp index 2941261240..f4a8fc7965 100644 --- a/src/chunkserver/trash.cpp +++ b/src/chunkserver/trash.cpp @@ -20,21 +20,24 @@ * Author: lixiaocui */ -#include +#include "src/chunkserver/trash.h" + #include +#include + #include -#include "src/chunkserver/trash.h" -#include "src/common/string_util.h" -#include "src/chunkserver/datastore/filename_operator.h" -#include "src/chunkserver/copyset_node.h" + #include "include/chunkserver/chunkserver_common.h" -#include "src/common/uri_parser.h" +#include "src/chunkserver/copyset_node.h" +#include "src/chunkserver/datastore/filename_operator.h" #include "src/chunkserver/raftlog/define.h" +#include "src/common/string_util.h" +#include "src/common/uri_parser.h" using ::curve::chunkserver::RAFT_DATA_DIR; +using ::curve::chunkserver::RAFT_LOG_DIR; using ::curve::chunkserver::RAFT_META_DIR; using ::curve::chunkserver::RAFT_SNAP_DIR; -using ::curve::chunkserver::RAFT_LOG_DIR; namespace curve { namespace chunkserver { @@ -60,13 +63,13 @@ int Trash::Init(TrashOptions options) { walPool_ = options.walPool; chunkNum_.store(0); - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; localFileSystem_->List(trashPath_, &files); - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -100,8 +103,8 @@ int Trash::Fini() { return 0; } -int Trash::RecycleCopySet(const std::string &dirPath) { - // 回收站目录不存在,需要创建 +int Trash::RecycleCopySet(const std::string& dirPath) { + // The recycle bin directory does not exist and needs to be created if (!localFileSystem_->DirExists(trashPath_)) { LOG(INFO) << "Copyset recyler directory " << trashPath_ << " does not exist, creating it"; @@ -113,10 +116,11 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } } - // 如果回收站已存在该目录,本次删除失败 - std::string dst = trashPath_ + "/" + - dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + - '.' + std::to_string(std::time(nullptr)); + // If the directory already exists in the recycle bin, this deletion failed + std::string dst = + trashPath_ + "/" + + dirPath.substr(dirPath.find_last_of('/', dirPath.length()) + 1) + '.' + + std::to_string(std::time(nullptr)); if (localFileSystem_->DirExists(dst)) { LOG(WARNING) << "recycle error: " << dst << " already exist in " << trashPath_; @@ -137,28 +141,28 @@ int Trash::RecycleCopySet(const std::string &dirPath) { } void Trash::DeleteEligibleFileInTrashInterval() { - while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { - // 扫描回收站 - DeleteEligibleFileInTrash(); - } + while (sleeper_.wait_for(std::chrono::seconds(scanPeriodSec_))) { + // Scan Recycle Bin + DeleteEligibleFileInTrash(); + } } void Trash::DeleteEligibleFileInTrash() { - // trash目录暂不存在 + // The trash directory does not currently exist if (!localFileSystem_->DirExists(trashPath_)) { return; } - // 读取trash目录下的所有目录 + // Read all directories under the trash directory std::vector files; if (0 != localFileSystem_->List(trashPath_, &files)) { LOG(ERROR) << "Trash failed list files in " << trashPath_; return; } - // 遍历trash下的文件 - for (auto &file : files) { - // 如果不是copyset目录,跳过 + // Traverse through files under trash + for (auto& file : files) { + // If it is not a copyset directory, skip if (!IsCopysetInTrash(file)) { continue; } @@ -172,7 +176,7 @@ void Trash::DeleteEligibleFileInTrash() { continue; } - // 删除copyset目录 + // Delete copyset directory if (0 != localFileSystem_->Delete(copysetDir)) { LOG(ERROR) << "Trash fail to delete " << copysetDir; return; @@ -180,10 +184,10 @@ void Trash::DeleteEligibleFileInTrash() { } } -bool Trash::IsCopysetInTrash(const std::string &dirName) { - // 合法的copyset目录: 高32位PoolId(>0)组成, 低32位由copysetId(>0)组成 - // 目录是十进制形式 - // 例如:2860448220024 (poolId: 666, copysetId: 888) +bool Trash::IsCopysetInTrash(const std::string& dirName) { + // Legal copyset directory: composed of high 32-bit PoolId(>0), and low + // 32-bit composed of copysetId(>0) The directory is in decimal form For + // example: 2860448220024 (poolId: 666, copysetId: 888) uint64_t groupId; auto n = dirName.find("."); if (n == std::string::npos) { @@ -196,7 +200,7 @@ bool Trash::IsCopysetInTrash(const std::string &dirName) { return GetPoolID(groupId) >= 1 && GetCopysetID(groupId) >= 1; } -bool Trash::NeedDelete(const std::string ©setDir) { +bool Trash::NeedDelete(const std::string& copysetDir) { int fd = localFileSystem_->Open(copysetDir, O_RDONLY); if (0 > fd) { LOG(ERROR) << "Trash fail open " << copysetDir; @@ -219,15 +223,15 @@ bool Trash::NeedDelete(const std::string ©setDir) { return true; } -bool Trash::IsChunkOrSnapShotFile(const std::string &chunkName) { +bool Trash::IsChunkOrSnapShotFile(const std::string& chunkName) { return FileNameOperator::FileType::UNKNOWN != - FileNameOperator::ParseFileName(chunkName).type; + FileNameOperator::ParseFileName(chunkName).type; } -bool Trash::RecycleChunksAndWALInDir( - const std::string ©setPath, const std::string &filename) { +bool Trash::RecycleChunksAndWALInDir(const std::string& copysetPath, + const std::string& filename) { bool isDir = localFileSystem_->DirExists(copysetPath); - // 是文件看是否需要回收 + // It's a file, check if recycling is required. if (!isDir) { if (IsChunkOrSnapShotFile(filename)) { return RecycleChunkfile(copysetPath, filename); @@ -238,18 +242,18 @@ bool Trash::RecycleChunksAndWALInDir( } } - // 是目录,继续list + // It's a directory, continue with the list std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; return false; } - // 遍历子文件 + // Traverse sub files bool ret = true; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; - // recycle 失败不应该中断其他文件的recycle + // recycle, failure should not interrupt the recycle of other files if (!RecycleChunksAndWALInDir(filePath, file)) { ret = false; } @@ -257,13 +261,13 @@ bool Trash::RecycleChunksAndWALInDir( return ret; } -bool Trash::RecycleChunkfile( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleChunkfile(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (0 != chunkFilePool_->RecycleFile(filepath)) { LOG(ERROR) << "Trash failed recycle chunk " << filepath - << " to FilePool"; + << " to FilePool"; return false; } @@ -271,13 +275,12 @@ bool Trash::RecycleChunkfile( return true; } -bool Trash::RecycleWAL( - const std::string &filepath, const std::string &filename) { +bool Trash::RecycleWAL(const std::string& filepath, + const std::string& filename) { (void)filename; LockGuard lg(mtx_); if (walPool_ != nullptr && 0 != walPool_->RecycleFile(filepath)) { - LOG(ERROR) << "Trash failed recycle WAL " << filepath - << " to WALPool"; + LOG(ERROR) << "Trash failed recycle WAL " << filepath << " to WALPool"; return false; } @@ -285,12 +288,12 @@ bool Trash::RecycleWAL( return true; } -bool Trash::IsWALFile(const std::string &fileName) { +bool Trash::IsWALFile(const std::string& fileName) { int match = 0; int64_t first_index = 0; int64_t last_index = 0; - match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, - &first_index, &last_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_CLOSED_PATTERN, &first_index, + &last_index); if (match == 2) { LOG(INFO) << "recycle closed segment wal file, path: " << fileName << " first_index: " << first_index @@ -298,8 +301,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return true; } - match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, - &first_index); + match = sscanf(fileName.c_str(), CURVE_SEGMENT_OPEN_PATTERN, &first_index); if (match == 1) { LOG(INFO) << "recycle open segment wal file, path: " << fileName << " first_index: " << first_index; @@ -308,7 +310,7 @@ bool Trash::IsWALFile(const std::string &fileName) { return false; } -uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { +uint32_t Trash::CountChunkNumInCopyset(const std::string& copysetPath) { std::vector files; if (0 != localFileSystem_->List(copysetPath, &files)) { LOG(ERROR) << "Trash failed to list files in " << copysetPath; @@ -317,15 +319,14 @@ uint32_t Trash::CountChunkNumInCopyset(const std::string ©setPath) { // Traverse subdirectories uint32_t chunkNum = 0; - for (auto &file : files) { + for (auto& file : files) { std::string filePath = copysetPath + "/" + file; bool isDir = localFileSystem_->DirExists(filePath); if (!isDir) { // valid: chunkfile, snapshotfile, walfile - if (!(IsChunkOrSnapShotFile(file) || - IsWALFile(file))) { - LOG(WARNING) << "Trash find a illegal file:" - << file << " in " << copysetPath; + if (!(IsChunkOrSnapShotFile(file) || IsWALFile(file))) { + LOG(WARNING) << "Trash find a illegal file:" << file << " in " + << copysetPath; continue; } ++chunkNum; diff --git a/src/chunkserver/trash.h b/src/chunkserver/trash.h index a3a3c89d53..c6791c52c0 100644 --- a/src/chunkserver/trash.h +++ b/src/chunkserver/trash.h @@ -25,25 +25,27 @@ #include #include -#include "src/fs/local_filesystem.h" + #include "src/chunkserver/datastore/file_pool.h" #include "src/common/concurrent/concurrent.h" #include "src/common/interruptible_sleeper.h" +#include "src/fs/local_filesystem.h" -using ::curve::common::Thread; using ::curve::common::Atomic; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; using ::curve::common::InterruptibleSleeper; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; +using ::curve::common::Thread; namespace curve { namespace chunkserver { -struct TrashOptions{ - // copyset的trash路径 +struct TrashOptions { + // The trash path of copyset std::string trashPath; - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for + // expiredAfteSec seconds int expiredAfterSec; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec; std::shared_ptr localFileSystem; @@ -60,18 +62,19 @@ class Trash { int Fini(); /* - * @brief DeleteEligibleFileInTrash 回收trash目录下的物理空间 - */ + * @brief DeleteEligibleFileInTrash recycles the physical space in the trash + * directory + */ void DeleteEligibleFileInTrash(); - int RecycleCopySet(const std::string &dirPath); + int RecycleCopySet(const std::string& dirPath); /* - * @brief 获取回收站中chunk的个数 - * - * @return chunk个数 - */ - uint32_t GetChunkNum() {return chunkNum_.load();} + * @brief Get the number of chunks in the recycle bin + * + * @return Number of chunks + */ + uint32_t GetChunkNum() { return chunkNum_.load(); } /** * @brief is WAL or not ? @@ -94,46 +97,49 @@ class Trash { private: /* - * @brief DeleteEligibleFileInTrashInterval 每隔一段时间进行trash物理空间回收 - */ + * @brief DeleteEligibleFileInTrashInterval Trash physical space recycling + * at regular intervals + */ void DeleteEligibleFileInTrashInterval(); /* - * @brief NeedDelete 文件是否需要删除,放入trash的时间大于 - * trash中expiredAfterSec可以删除 - * - * @param[in] copysetDir copyset的目录路径 - * - * @return true-可以被删除 - */ - bool NeedDelete(const std::string ©setDir); + * @brief NeedDelete Does the file need to be deleted, and the time it takes + * to place the trash is greater than ExpiredAfterSec in trash can be + * deleted + * + * @param[in] copysetDir: copyset directory path + * + * @return true-can be deleted + */ + bool NeedDelete(const std::string& copysetDir); /* - * @brief IsCopysetInTrash 是否为回收站中的copyset的目录 - * - * @param[in] dirName 文目录路径 - * - * @return true-符合copyset目录命名规则 - */ - bool IsCopysetInTrash(const std::string &dirName); + * @brief IsCopysetInTrash Is the directory of the copyset in the recycle + * bin + * + * @param[in] dirName: directory path + * + * @return true-Complies with copyset directory naming rules + */ + bool IsCopysetInTrash(const std::string& dirName); /* - * @brief Recycle Chunkfile and wal file in Copyset - * - * @param[in] copysetDir copyset dir - * @param[in] filename filename - */ - bool RecycleChunksAndWALInDir( - const std::string ©setDir, const std::string &filename); + * @brief Recycle Chunkfile and wal file in Copyset + * + * @param[in] copysetDir: copyset dir + * @param[in] filename: filename + */ + bool RecycleChunksAndWALInDir(const std::string& copysetDir, + const std::string& filename); /* - * @brief Recycle Chunkfile - * - * @param[in] filepath 文件路径 - * @param[in] filename 文件名 - */ - bool RecycleChunkfile( - const std::string &filepath, const std::string &filename); + * @brief Recycle Chunkfile + * + * @param[in] filepath: file path + * @param[in] filename: File name + */ + bool RecycleChunkfile(const std::string& filepath, + const std::string& filename); /** * @brief Recycle WAL @@ -147,41 +153,42 @@ class Trash { bool RecycleWAL(const std::string& filepath, const std::string& filename); /* - * @brief 统计copyset目录中的chunk个数 - * - * @param[in] copysetPath chunk所在目录 - * @return 返回chunk个数 - */ - uint32_t CountChunkNumInCopyset(const std::string ©setPath); + * @brief Counts the number of chunks in the copyset directory + * + * @param[in] copysetPath: Chunk directory + * @return the number of chunks + */ + uint32_t CountChunkNumInCopyset(const std::string& copysetPath); private: - // 文件在放入trash中expiredAfteSec秒后,可以被物理回收 + // The file can be physically recycled after being placed in trash for + // expiredAfterSec seconds int expiredAfterSec_; - // 扫描trash目录的时间间隔 + // Time interval for scanning the trash directory int scanPeriodSec_; - // 回收站中chunk的个数 + // Number of chunks in the Recycle Bin Atomic chunkNum_; Mutex mtx_; - // 本地文件系统 + // Local File System std::shared_ptr localFileSystem_; - // chunk池子 + // chunk Pool std::shared_ptr chunkFilePool_; // wal pool std::shared_ptr walPool_; - // 回收站全路径 + // Recycle Bin Full Path std::string trashPath_; - // 后台清理回收站的线程 + // Thread for background cleaning of the recycle bin Thread recycleThread_; - // false-开始后台任务,true-停止后台任务 + // false-Start background task, true-Stop background task Atomic isStop_; InterruptibleSleeper sleeper_; @@ -190,4 +197,3 @@ class Trash { } // namespace curve #endif // SRC_CHUNKSERVER_TRASH_H_ - diff --git a/src/client/client_common.h b/src/client/client_common.h index 8620f050d1..ab067e8114 100644 --- a/src/client/client_common.h +++ b/src/client/client_common.h @@ -28,8 +28,8 @@ #include #include -#include #include +#include #include "include/client/libcurve.h" #include "src/common/throttle.h" @@ -53,7 +53,7 @@ constexpr uint64_t KiB = 1024; constexpr uint64_t MiB = 1024 * KiB; constexpr uint64_t GiB = 1024 * MiB; -// 操作类型 +// Operation type enum class OpType { READ = 0, WRITE, @@ -67,7 +67,7 @@ enum class OpType { }; /** - * 与nameserver.proto中的FileStatus一一对应 + * Corresponds one-to-one with FileStatus in nameserver.proto */ enum class FileStatus { Created = 0, @@ -90,12 +90,10 @@ typedef struct ChunkIDInfo { ChunkIDInfo(ChunkID cid, LogicPoolID lpid, CopysetID cpid) : cid_(cid), cpid_(cpid), lpid_(lpid) {} - bool Valid() const { - return lpid_ > 0 && cpid_ > 0; - } + bool Valid() const { return lpid_ > 0 && cpid_ > 0; } } ChunkIDInfo_t; -// 保存每个chunk对应的版本信息 +// Save the version information corresponding to each chunk typedef struct ChunkInfoDetail { std::vector chunkSn; } ChunkInfoDetail_t; @@ -106,7 +104,8 @@ typedef struct LeaseSession { uint64_t createTime; } LeaseSession_t; -// 保存logicalpool中segment对应的copysetid信息 +// Save the copysetid information corresponding to +// the segment in the logicalpool typedef struct LogicalPoolCopysetIDInfo { LogicPoolID lpid; std::vector cpidVec; @@ -117,7 +116,7 @@ typedef struct LogicalPoolCopysetIDInfo { } } LogicalPoolCopysetIDInfo_t; -// 保存每个segment的基本信息 +// Save basic information for each segment typedef struct SegmentInfo { uint32_t segmentsize; uint32_t chunksize; @@ -147,9 +146,9 @@ typedef struct FInfo { uint64_t length; uint64_t ctime; uint64_t seqnum; - // userinfo是当前操作这个文件的用户信息 + // userinfo is the user information currently operating on this file UserInfo_t userinfo; - // owner是当前文件所属信息 + // owner is the information to which the current file belongs std::string owner; std::string filename; std::string fullPathName; @@ -162,7 +161,7 @@ typedef struct FInfo { uint64_t stripeCount; std::string poolset; - OpenFlags openflags; + OpenFlags openflags; common::ReadWriteThrottleParams throttleParams; FInfo() { @@ -187,10 +186,10 @@ typedef struct FileEpoch { } } FileEpoch_t; -// PeerAddr 代表一个copyset group里的一个chunkserver节点 -// 与braft中的PeerID对应 +// PeerAddr represents a chunkserver node in a copyset group +// Corresponds to PeerID in braft struct PeerAddr { - // 节点的地址信息 + // Address information of nodes EndPoint addr_; PeerAddr() = default; @@ -198,17 +197,17 @@ struct PeerAddr { bool IsEmpty() const { return (addr_.ip == butil::IP_ANY && addr_.port == 0) && - addr_.socket_file.empty(); + addr_.socket_file.empty(); } - // 重置当前地址信息 + // Reset current address information void Reset() { addr_.ip = butil::IP_ANY; addr_.port = 0; } - // 从字符串中将地址信息解析出来 - int Parse(const std::string &str) { + // Parse address information from a string + int Parse(const std::string& str) { int idx; char ip_str[64]; if (2 > sscanf(str.c_str(), "%[^:]%*[:]%d%*[:]%d", ip_str, &addr_.port, @@ -224,8 +223,9 @@ struct PeerAddr { return 0; } - // 将该节点地址信息转化为字符串形式 - // 在get leader调用中可以将该值直接传入request + // Convert the node address information into a string format. + // In the get leader call, this value can be directly passed + // into the request. std::string ToString() const { char str[128]; snprintf(str, sizeof(str), "%s:%d", butil::endpoint2str(addr_).c_str(), @@ -233,32 +233,32 @@ struct PeerAddr { return std::string(str); } - bool operator==(const PeerAddr &other) const { + bool operator==(const PeerAddr& other) const { return addr_ == other.addr_; } }; -inline const char *OpTypeToString(OpType optype) { +inline const char* OpTypeToString(OpType optype) { switch (optype) { - case OpType::READ: - return "Read"; - case OpType::WRITE: - return "Write"; - case OpType::READ_SNAP: - return "ReadSnapshot"; - case OpType::DELETE_SNAP: - return "DeleteSnapshot"; - case OpType::CREATE_CLONE: - return "CreateCloneChunk"; - case OpType::RECOVER_CHUNK: - return "RecoverChunk"; - case OpType::GET_CHUNK_INFO: - return "GetChunkInfo"; - case OpType::DISCARD: - return "Discard"; - case OpType::UNKNOWN: - default: - return "Unknown"; + case OpType::READ: + return "Read"; + case OpType::WRITE: + return "Write"; + case OpType::READ_SNAP: + return "ReadSnapshot"; + case OpType::DELETE_SNAP: + return "DeleteSnapshot"; + case OpType::CREATE_CLONE: + return "CreateCloneChunk"; + case OpType::RECOVER_CHUNK: + return "RecoverChunk"; + case OpType::GET_CHUNK_INFO: + return "GetChunkInfo"; + case OpType::DISCARD: + return "Discard"; + case OpType::UNKNOWN: + default: + return "Unknown"; } } @@ -279,16 +279,14 @@ class SnapCloneClosure : public google::protobuf::Closure { class ClientDummyServerInfo { public: - static ClientDummyServerInfo &GetInstance() { + static ClientDummyServerInfo& GetInstance() { static ClientDummyServerInfo clientInfo; return clientInfo; } - void SetIP(const std::string &ip) { localIP_ = ip; } + void SetIP(const std::string& ip) { localIP_ = ip; } - std::string GetIP() const { - return localIP_; - } + std::string GetIP() const { return localIP_; } void SetPort(uint32_t port) { localPort_ = port; } @@ -309,22 +307,22 @@ class ClientDummyServerInfo { inline void TrivialDeleter(void*) {} -inline const char *FileStatusToName(FileStatus status) { +inline const char* FileStatusToName(FileStatus status) { switch (status) { - case FileStatus::Created: - return "Created"; - case FileStatus::Deleting: - return "Deleting"; - case FileStatus::Cloning: - return "Cloning"; - case FileStatus::CloneMetaInstalled: - return "CloneMetaInstalled"; - case FileStatus::Cloned: - return "Cloned"; - case FileStatus::BeingCloned: - return "BeingCloned"; - default: - return "Unknown"; + case FileStatus::Created: + return "Created"; + case FileStatus::Deleting: + return "Deleting"; + case FileStatus::Cloning: + return "Cloning"; + case FileStatus::CloneMetaInstalled: + return "CloneMetaInstalled"; + case FileStatus::Cloned: + return "Cloned"; + case FileStatus::BeingCloned: + return "BeingCloned"; + default: + return "Unknown"; } } @@ -359,7 +357,7 @@ struct CreateFileContext { std::string poolset; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_COMMON_H_ diff --git a/src/client/client_metric.h b/src/client/client_metric.h index 826b8b9b2d..a2b48f5a16 100644 --- a/src/client/client_metric.h +++ b/src/client/client_metric.h @@ -28,9 +28,9 @@ #include #include -#include "src/common/timeutility.h" #include "src/client/client_common.h" #include "src/common/string_util.h" +#include "src/common/timeutility.h" using curve::common::TimeUtility; @@ -48,11 +48,11 @@ struct SlowRequestMetric { : count(prefix, name + "_total") {} }; -// 秒级信息统计 +// Second-level information statistics struct PerSecondMetric { - // 当前persecond计数总数 + // Current total number of second counts bvar::Adder count; - // persecond真实数据,这个数据依赖于count + // persecond real data depends on the count bvar::PerSecond> value; PerSecondMetric(const std::string& prefix, const std::string& name) @@ -60,21 +60,21 @@ struct PerSecondMetric { value(prefix, name, &count, 1) {} }; -// 接口统计信息metric信息统计 +// Interface statistics information metric information statistics struct InterfaceMetric { - // 接口统计信息调用qps + // Call qps for interface statistics information PerSecondMetric qps; // error request persecond PerSecondMetric eps; // receive request persecond PerSecondMetric rps; - // 调用吞吐 + // Call throughput PerSecondMetric bps; - // 调用超时次数qps + // Call timeout count qps PerSecondMetric timeoutQps; - // 调用redirect次数qps + // Number of calls to redirect qps PerSecondMetric redirectQps; - // 调用latency + // Call latency bvar::LatencyRecorder latency; InterfaceMetric(const std::string& prefix, const std::string& name) @@ -102,33 +102,36 @@ struct DiscardMetric { bvar::Adder pending; }; -// 文件级别metric信息统计 +// File level metric information statistics struct FileMetric { const std::string prefix = "curve_client"; - // 当前metric归属于哪个文件 + // Which file does the current metric belong to std::string filename; - // 当前文件inflight io数量 + // Current file inflight io quantity bvar::Adder inflightRPCNum; - // 当前文件请求的最大请求字节数,这种统计方式可以很方便的看到最大值,分位值 + // The maximum number of request bytes for the current file request, which + // is a convenient statistical method to see the maximum and quantile values bvar::LatencyRecorder readSizeRecorder; bvar::LatencyRecorder writeSizeRecorder; bvar::LatencyRecorder discardSizeRecorder; - // libcurve最底层read rpc接口统计信息metric统计 + // Libcurve's lowest level read rpc interface statistics information metric + // statistics InterfaceMetric readRPC; - // libcurve最底层write rpc接口统计信息metric统计 + // Libcurve's lowest level write rpc interface statistics information metric + // statistics InterfaceMetric writeRPC; - // 用户读请求qps、eps、rps + // User Read Request QPS, EPS, RPS InterfaceMetric userRead; - // 用户写请求qps、eps、rps + // User write request QPS, EPS, RPS InterfaceMetric userWrite; // user's discard request InterfaceMetric userDiscard; - // get leader失败重试qps + // Get leader failed and retry qps PerSecondMetric getLeaderRetryQPS; // Number of slow requests @@ -153,52 +156,52 @@ struct FileMetric { discardMetric(prefix + filename) {} }; -// 用于全局mds接口统计信息调用信息统计 +// Used for global mds interface statistics, call information statistics struct MDSClientMetric { std::string prefix; - // mds的地址信息 + // Address information of mds std::string metaserverAddr; bvar::PassiveStatus metaserverAddress; - // openfile接口统计信息 + // Openfile interface statistics InterfaceMetric openFile; - // createFile接口统计信息 + // CreateFile interface statistics InterfaceMetric createFile; - // closeFile接口统计信息 + // CloseFile interface statistics InterfaceMetric closeFile; - // getFileInfo接口统计信息 + // GetFileInfo interface statistics InterfaceMetric getFile; - // RefreshSession接口统计信息 + // RefreshSession Interface Statistics InterfaceMetric refreshSession; - // GetServerList接口统计信息 + // GetServerList interface statistics InterfaceMetric getServerList; - // GetOrAllocateSegment接口统计信息 + // GetOrAllocateSegment interface statistics InterfaceMetric getOrAllocateSegment; - // DeAllocateSegment接口统计信息 + // DeAllocateSegment Interface Statistics InterfaceMetric deAllocateSegment; - // RenameFile接口统计信息 + // RenameFile Interface Statistics InterfaceMetric renameFile; - // Extend接口统计信息 + // Extend Interface Statistics InterfaceMetric extendFile; - // DeleteFile接口统计信息 + // DeleteFile interface statistics InterfaceMetric deleteFile; // RecoverFile interface metric InterfaceMetric recoverFile; - // changeowner接口统计信息 + // Changeowner Interface Statistics InterfaceMetric changeOwner; - // listdir接口统计信息 + // Listdir interface statistics InterfaceMetric listDir; - // register接口统计信息 + // Register Interface Statistics InterfaceMetric registerClient; - // GetChunkServerID接口统计 + // GetChunkServerID interface statistics InterfaceMetric getChunkServerId; - // ListChunkServerInServer接口统计 + // ListChunkServerInServer Interface Statistics InterfaceMetric listChunkserverInServer; // IncreaseEpoch InterfaceMetric increaseEpoch; - // 切换mds server总次数 + // Total number of switching MDS server bvar::Adder mdsServerChangeTimes; explicit MDSClientMetric(const std::string& prefix_ = "") @@ -245,8 +248,8 @@ struct LatencyGuard { class MetricHelper { public: /** - * 统计getleader重试次数 - * @param: fm为当前文件的metric指针 + * Count the number of retries for getleader + * @param: fm is the metric pointer of the current file */ static void IncremGetLeaderRetryTime(FileMetric* fm) { if (fm != nullptr) { @@ -255,13 +258,13 @@ class MetricHelper { } /** - * 统计用户当前读写请求次数,用于qps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of read and write requests from users for QPS + * calculation + * @param: fm: The metric pointer of the current file + * @param: length: The current request size + * @param: read: whether the current operation is a read or write operation */ - static void IncremUserQPSCount(FileMetric* fm, - uint64_t length, + static void IncremUserQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -286,9 +289,10 @@ class MetricHelper { } /** - * 统计用户当前读写请求失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of failed read/write requests by users for EPS + * calculation + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremUserEPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -308,13 +312,19 @@ class MetricHelper { } /** - * 统计用户当前接收到的读写请求次数,用于rps计算 - * rps: receive request persecond, 就是当前接口每秒接收到的请求数量 - * qps: query request persecond, 就是当前接口每秒处理的请求数量 - * eps: error request persecond, 就是当前接口每秒出错的请求数量 - * rps减去qps就是当前client端每秒钟等待的请求数量,这部分请求会持久占用当前一秒内的内存 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the number of read and write requests currently received by the + * user for RPS calculation. + * rps: receive request persecond, which is the + * number of requests received by the current interface per second. + * qps:query request persecond, which is the number of requests processed by + * the current interface per second. + * eps: error request persecond, which is the number of requests that make + * errors per second on the current interface. + * rps minus qps is the number of requests that the current client is + * waiting for per second, which will persistently occupy the current memory + * for one second. + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremUserRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -334,9 +344,9 @@ class MetricHelper { } /** - * 统计当前rpc失败次数,用于eps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Count the current number of RPC failures for EPS calculation + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremFailRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -354,9 +364,10 @@ class MetricHelper { } /** - * 统计用户当前读写请求超时次数,用于timeoutQps计算 - * @param: fm为当前文件的metric指针 - * @param: read为当前操作是读操作还是写操作 + * Counts the number of times a user's current read/write request has timed + * out, used for timeoutQps calculation + * @param: fm: The metric pointer of the current file + * @param: read: whether the current operation is a read or write operation */ static void IncremTimeOutRPCCount(FileMetric* fm, OpType type) { if (fm != nullptr) { @@ -374,9 +385,9 @@ class MetricHelper { } /** - * 统计请求被redirect的次数 - * @param fileMetric 当前文件的metric指针 - * @param opType 请求类型 + * Count the number of times requests have been redirected + * @param fileMetric: The metric pointer of the current file + * @param opType: request type */ static void IncremRedirectRPCCount(FileMetric* fileMetric, OpType opType) { if (fileMetric) { @@ -394,13 +405,13 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于qps及bps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Statistics of the number of requests and bandwidth for reading and + * writing RPC interfaces, used for QPS and bps calculations + * @param: fm: The metric pointer of the current file + * @param: length: The current request size + * @param: read: whether the current operation is a read or write operation */ - static void IncremRPCQPSCount(FileMetric* fm, - uint64_t length, + static void IncremRPCQPSCount(FileMetric* fm, uint64_t length, OpType type) { if (fm != nullptr) { switch (type) { @@ -419,13 +430,13 @@ class MetricHelper { } /** - * 统计读写RPC接口统计信息请求次数及带宽统计,用于rps计算 - * @param: fm为当前文件的metric指针 - * @param: length为当前请求大小 - * @param: read为当前操作是读操作还是写操作 + * Statistics of the number of requests and bandwidth for reading and writing + * RPC interfaces, used for RPS calculations + * @param: fm: The metric pointer of the current file + * @param: length: The current request size + * @param: read: whether the current operation is a read or write operation */ - static void IncremRPCRPSCount(FileMetric* fm, - OpType type) { + static void IncremRPCRPSCount(FileMetric* fm, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -440,9 +451,7 @@ class MetricHelper { } } - static void LatencyRecord(FileMetric* fm, - uint64_t duration, - OpType type) { + static void LatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { case OpType::READ: @@ -457,8 +466,7 @@ class MetricHelper { } } - static void UserLatencyRecord(FileMetric* fm, - uint64_t duration, + static void UserLatencyRecord(FileMetric* fm, uint64_t duration, OpType type) { if (fm != nullptr) { switch (type) { @@ -502,7 +510,7 @@ class MetricHelper { } } }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_CLIENT_METRIC_H_ diff --git a/src/client/splitor.h b/src/client/splitor.h index eaffa27a62..4dd95ddded 100644 --- a/src/client/splitor.h +++ b/src/client/splitor.h @@ -46,53 +46,51 @@ class Splitor { static void Init(const IOSplitOption& ioSplitOpt); /** - * 用户IO拆分成Chunk级别的IO - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: FileEpoch_t file epoch info + * Split user IO into Chunk level IO + * @param: iotracker: Big IO Context Information + * @param: metaCache: The cache information that needs to be used during + * the IO splitting process + * @param: targetlist: The storage list of small IO after the large IO is + * split + * @param: data: The data to be written + * @param: offset: The actual offset of IO issued by the user + * @param: length: Data length + * @param: mdsclient: Searches for information through mdsclient when + * searching for metaahe fails + * @param: fi: stores some basic information about the current IO, such as + * chunksize, etc + * @param: FileEpoch_t: file epoch information */ - static int IO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - size_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch); + static int IO2ChunkRequests(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, size_t length, + MDSClient* mdsclient, const FInfo_t* fi, + const FileEpoch_t* fEpoch); /** - * 对单ChunkIO进行细粒度拆分 - * @param: iotracker大IO上下文信息 - * @param: metaCache是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: cid是当前chunk的ID信息 - * @param: data是待写的数据 - * @param: offset是当前chunk内的偏移 - * @param: length数据长度 - * @param: seq是当前chunk的版本号 + * Fine grained splitting of single ChunkIO + * @param: iotracker: Big IO Context Information + * @param: metaCache: The cache information that needs to be used during + * the IO splitting process + * @param: targetlist: The storage list of small IO after the large IO is + * split + * @param: cid: The ID information of the current chunk + * @param: data: The data to be written + * @param: offset: The offset within the current chunk + * @param: length: Data length + * @param: seq: The version number of the current chunk */ - static int SingleChunkIO2ChunkRequests(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - const ChunkIDInfo& cid, - butil::IOBuf* data, - off_t offset, - size_t length, - uint64_t seq); + static int SingleChunkIO2ChunkRequests( + IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, const ChunkIDInfo& cid, + butil::IOBuf* data, off_t offset, size_t length, uint64_t seq); /** - * @brief 计算请求的location信息 - * @param ioTracker io上下文信息 - * @param metaCache 文件缓存信息 - * @param chunkIdx 当前chunk信息 - * @return source信息 + * @brief Calculates the location information of the request + * @param ioTracker: IO Context Information + * @param metaCache: File cache information + * @param chunkIdx: Current chunk information + * @return source information */ static RequestSourceInfo CalcRequestSourceInfo(IOTracker* ioTracker, MetaCache* metaCache, @@ -105,34 +103,33 @@ class Splitor { private: /** - * IO2ChunkRequests内部会调用这个函数,进行真正的拆分操作 - * @param: iotracker大IO上下文信息 - * @param: mc是io拆分过程中需要使用的缓存信息 - * @param: targetlist大IO被拆分之后的小IO存储列表 - * @param: data 是待写的数据 - * @param: offset用户下发IO的其实偏移 - * @param: length数据长度 - * @param: mdsclient在查找metacahe失败时,通过mdsclient查找信息 - * @param: fi存储当前IO的一些基本信息,比如chunksize等 - * @param: chunkidx是当前chunk在vdisk中的索引值 + * IO2ChunkRequests will internally call this function for actual splitting + * operations + * @param: iotracker: Big IO Context Information + * @param: mc: The cache information that needs to be used during IO + * splitting process + * @param: targetlist: The storage list of small IO after the large IO is + * split + * @param: Data: The data to be written + * @param: offset: The actual offset of IO issued by the user + * @param: length: Data length + * @param: mdsclient: Searches for information through mdsclient when + * searching for metaahe fails + * @param: fi: Stores some basic information about the current IO, such as + * chunksize, etc + * @param: chunkidx: The index value of the current chunk in the vdisk */ - static bool AssignInternal(IOTracker* iotracker, - MetaCache* metaCache, - std::vector* targetlist, - butil::IOBuf* data, - off_t offset, - uint64_t length, - MDSClient* mdsclient, - const FInfo_t* fi, - const FileEpoch_t* fEpoch, - ChunkIndex chunkidx); - - static bool GetOrAllocateSegment(bool allocateIfNotExist, - uint64_t offset, - MDSClient* mdsClient, - MetaCache* metaCache, + static bool AssignInternal(IOTracker* iotracker, MetaCache* metaCache, + std::vector* targetlist, + butil::IOBuf* data, off_t offset, + uint64_t length, MDSClient* mdsclient, + const FInfo_t* fi, const FileEpoch_t* fEpoch, + ChunkIndex chunkidx); + + static bool GetOrAllocateSegment(bool allocateIfNotExist, uint64_t offset, + MDSClient* mdsClient, MetaCache* metaCache, const FInfo* fileInfo, - const FileEpoch_t *fEpoch, + const FileEpoch_t* fEpoch, ChunkIndex chunkidx); static int SplitForNormal(IOTracker* iotracker, MetaCache* metaCache, @@ -149,14 +146,13 @@ class Splitor { static bool MarkDiscardBitmap(IOTracker* iotracker, FileSegment* fileSegment, - SegmentIndex segmentIndex, - uint64_t offset, + SegmentIndex segmentIndex, uint64_t offset, uint64_t len); private: - // IO拆分模块所使用的配置信息 + // Configuration information used for IO split modules static IOSplitOption iosplitopt_; }; -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve #endif // SRC_CLIENT_SPLITOR_H_ diff --git a/src/client/unstable_helper.cpp b/src/client/unstable_helper.cpp index 5cc99945fe..ae330b1294 100644 --- a/src/client/unstable_helper.cpp +++ b/src/client/unstable_helper.cpp @@ -24,13 +24,13 @@ namespace curve { namespace client { -UnstableState -UnstableHelper::GetCurrentUnstableState(ChunkServerID csId, - const butil::EndPoint &csEndPoint) { +UnstableState UnstableHelper::GetCurrentUnstableState( + ChunkServerID csId, const butil::EndPoint& csEndPoint) { std::string ip = butil::ip2str(csEndPoint.ip).c_str(); mtx_.lock(); - // 如果当前ip已经超过阈值,则直接返回chunkserver unstable + // If the current IP has exceeded the threshold, it will directly return + // chunkserver unstable uint32_t unstabled = serverUnstabledChunkservers_[ip].size(); if (unstabled >= option_.serverUnstableThreshold) { serverUnstabledChunkservers_[ip].emplace(csId); diff --git a/src/client/unstable_helper.h b/src/client/unstable_helper.h index 89cc22be8e..ba23343501 100644 --- a/src/client/unstable_helper.h +++ b/src/client/unstable_helper.h @@ -35,20 +35,17 @@ namespace curve { namespace client { -enum class UnstableState { - NoUnstable, - ChunkServerUnstable, - ServerUnstable -}; - -// 如果chunkserver宕机或者网络不可达, 发往对应chunkserver的rpc会超时 -// 返回之后, 回去refresh leader然后再去发送请求 -// 这种情况下不同copyset上的请求,总会先rpc timedout然后重新refresh leader -// 为了避免一次多余的rpc timedout -// 记录一下发往同一个chunkserver上超时请求的次数 -// 如果超过一定的阈值,会发送http请求检查chunkserver是否健康 -// 如果不健康,则通知所有leader在这台chunkserver上的copyset -// 主动去refresh leader,而不是根据缓存的leader信息直接发送rpc +enum class UnstableState { NoUnstable, ChunkServerUnstable, ServerUnstable }; + +// If the chunkserver goes down or the network is unreachable, the RPC sent to +// the corresponding chunkserver will time out. After returning, go back to the +// refresh leader and then send the request. In this case, requests on different +// copysets will always first RPC timeout and then refresh the leader again. To +// avoid a redundant RPC timeout, record the number of timeout requests sent to +// the same chunkserver. If the threshold is exceeded, an HTTP request will be +// sent to check if the chunkserver is healthy. If not healthy, notify all +// leaders of the copyset on this chunkserver. Actively refresh the leader +// instead of directly sending RPC based on cached leader information. class UnstableHelper { public: UnstableHelper() = default; @@ -56,9 +53,7 @@ class UnstableHelper { UnstableHelper(const UnstableHelper&) = delete; UnstableHelper& operator=(const UnstableHelper&) = delete; - void Init(const ChunkServerUnstableOption& opt) { - option_ = opt; - } + void Init(const ChunkServerUnstableOption& opt) { option_ = opt; } void IncreTimeout(ChunkServerID csId) { std::unique_lock guard(mtx_); @@ -78,10 +73,10 @@ class UnstableHelper { private: /** - * @brief 检查chunkserver状态 + * @brief Check chunkserver status * - * @param: endPoint chunkserver的ip:port地址 - * @return: true 健康 / false 不健康 + * @param: endPoint: ip:port address of endPoint chunkserver + * @return: true healthy/ false unhealthy */ bool CheckChunkServerHealth(const butil::EndPoint& endPoint) const { return ServiceHelper::CheckChunkServerHealth( @@ -92,10 +87,10 @@ class UnstableHelper { bthread::Mutex mtx_; - // 同一chunkserver连续超时请求次数 + // Number of consecutive timeout requests for the same chunkserver std::unordered_map timeoutTimes_; - // 同一server上unstable chunkserver的id + // The ID of an unstable chunkserver on the same server std::unordered_map> serverUnstabledChunkservers_; }; diff --git a/src/common/stringstatus.h b/src/common/stringstatus.h index 203b851bfc..9883f0d3ee 100644 --- a/src/common/stringstatus.h +++ b/src/common/stringstatus.h @@ -20,28 +20,28 @@ * Author: lixiaocui */ - -#ifndef SRC_COMMON_STRINGSTATUS_H_ -#define SRC_COMMON_STRINGSTATUS_H_ +#ifndef SRC_COMMON_STRINGSTATUS_H_ +#define SRC_COMMON_STRINGSTATUS_H_ #include -#include + #include +#include namespace curve { namespace common { class StringStatus { public: /** - * @brief ExposeAs 用于初始化bvar + * @brief ExposeAs: Used to initialize bvar * - * @param[in] prefix, 前缀 - * @param[in] name, 名字 + * @param[in] prefix: Prefix + * @param[in] name: Name */ - void ExposeAs(const std::string &prefix, const std::string &name); + void ExposeAs(const std::string& prefix, const std::string& name); /** - * @brief Set 设置每项key-value信息 + * @brief Set: sets the key-value information for each item * * @param[in] key * @param[in] value @@ -49,30 +49,31 @@ class StringStatus { void Set(const std::string& key, const std::string& value); /** - * @brief Update 把当前key-value map中的键值对以json string的形式设置到status中 //NOLINT + * @brief Update: Sets the key-value pairs in the current // NOLINT + * key-value map to status as JSON strings // NOLINT */ void Update(); /** - * @brief GetValueByKey 获取指定key对应的value + * @brief GetValueByKey: Get the value corresponding to the specified key * - * @param[in] key 指定key + * @param[in] key: Specify the key */ - std::string GetValueByKey(const std::string &key); + std::string GetValueByKey(const std::string& key); /** - * @brief JsonBody 获取当前key-value map对应的json形式字符串 + * @brief JsonBody: obtains the JSON format string corresponding to the + * current key-value map */ std::string JsonBody(); private: - // 需要导出的结构体的key-value map + // The key-value map of the structure to be exported std::map kvs_; - // 该导出项对应的status + // The status corresponding to the exported item bvar::Status status_; }; } // namespace common } // namespace curve #endif // SRC_COMMON_STRINGSTATUS_H_ - diff --git a/src/common/timeutility.h b/src/common/timeutility.h index 1ba3483d34..d3fc2d244c 100644 --- a/src/common/timeutility.h +++ b/src/common/timeutility.h @@ -25,9 +25,10 @@ #include #include #include + +#include #include #include -#include namespace curve { namespace common { @@ -57,7 +58,8 @@ class TimeUtility { return localtime(&now)->tm_hour; } - // 时间戳转成标准时间输出在standard里面,时间戳单位为秒 + // Convert the timestamp to standard time and output it in standard, with + // the timestamp unit in seconds static inline void TimeStampToStandard(time_t timeStamp, std::string* standard) { char now[64]; @@ -67,7 +69,7 @@ class TimeUtility { *standard = std::string(now); } - // 时间戳转成标准时间并返回,时间戳单位为秒 + // The timestamp is converted to standard time and returned in seconds static inline std::string TimeStampToStandard(time_t timeStamp) { char now[64]; struct tm p; @@ -85,13 +87,9 @@ class ExpiredTime { public: ExpiredTime() : startUs_(TimeUtility::GetTimeofDayUs()) {} - double ExpiredSec() const { - return ExpiredUs() / 1000000; - } + double ExpiredSec() const { return ExpiredUs() / 1000000; } - double ExpiredMs() const { - return ExpiredUs() / 1000; - } + double ExpiredMs() const { return ExpiredUs() / 1000; } double ExpiredUs() const { return TimeUtility::GetTimeofDayUs() - startUs_; @@ -101,7 +99,7 @@ class ExpiredTime { uint64_t startUs_; }; -} // namespace common -} // namespace curve +} // namespace common +} // namespace curve -#endif // SRC_COMMON_TIMEUTILITY_H_ +#endif // SRC_COMMON_TIMEUTILITY_H_ diff --git a/src/common/uuid.h b/src/common/uuid.h index 8fbc41f61c..a68f865fcf 100644 --- a/src/common/uuid.h +++ b/src/common/uuid.h @@ -27,26 +27,29 @@ extern "C" { void uuid_generate(uuid_t out); void uuid_generate_random(uuid_t out); void uuid_generate_time(uuid_t out); -// 指明由uuid_generate_time生成的uuid是否使用了时间同步机制,不进行封装。 +// Indicate whether the UUID generated by uuid_generate_time utilizes a time +// synchronization mechanism without encapsulation int uuid_generate_time_safe(uuid_t out); } #include #define BUFF_LEN 36 namespace curve { namespace common { -// 生成uuid的生成器 +// Generator for generating uuid class UUIDGenerator { public: UUIDGenerator() {} /** - * @brief 生成uuid,优先采用的算法 - * 如果存在一个高质量的随机数生成器(/dev/urandom), - * UUID将基于其生成的随机数产生。 - * 备用算法:在高质量的随机数生成器不可用的情况下,如果可以获取到MAC地址, - * 则将利用由随机数生成器产生的随机数、当前时间、MAC地址生成UUID。 + * @brief Generate a UUID with a prioritized algorithm selection + * If a high-quality random number generator (/dev/urandom) is available, + * the UUID will be generated based on the random numbers it generates. + * Backup algorithm: If a high-quality random number generator is not + * available and if the MAC address can be obtained, the UUID will be + * generated using a combination of random numbers, current time, and the + * MAC address. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUID() { uuid_t out; @@ -58,11 +61,14 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 使用全局时钟、MAC地址。有MAC地址泄露风险。为了保证唯一性还使用的时间同步机制, - * 如果,时间同步机制不可用,多台机器上生成的uuid可能会重复。 + * @brief Generate a UUID for the specified purpose. + * It utilizes the global clock and MAC address, but there is a risk of + * MAC address leakage. To ensure uniqueness, it also employs a time + * synchronization mechanism. However, if the time synchronization + * mechanism is not available, there is a possibility of UUID duplication + * when generated on multiple machines. * @param : - * @return 生成的uuid + * @return The generated UUID */ std::string GenerateUUIDTime() { uuid_t out; @@ -74,10 +80,11 @@ class UUIDGenerator { } /** - * @brief 生成uuid - * 强制完全使用随机数,优先使用(/dev/urandom),备用(伪随机数生成器)。 - * 在使用伪随机数生成器的情况下,uuid有重复的风险。 - * @return 生成的uuid + * @brief Generate a UUID + * Forcefully utilize random numbers, with a preference for (/dev/urandom) + * and a fallback to pseudo-random number generation. When using the + * pseudo-random number generator, there is a risk of UUID duplication. + * @return The generated UUID */ std::string GenerateUUIDRandom() { uuid_t out; diff --git a/src/common/wait_interval.h b/src/common/wait_interval.h index 69c82143c2..a6f64d0c0e 100644 --- a/src/common/wait_interval.h +++ b/src/common/wait_interval.h @@ -20,36 +20,37 @@ * Author: lixiaocui */ -#ifndef SRC_COMMON_WAIT_INTERVAL_H_ -#define SRC_COMMON_WAIT_INTERVAL_H_ +#ifndef SRC_COMMON_WAIT_INTERVAL_H_ +#define SRC_COMMON_WAIT_INTERVAL_H_ #include "src/common/interruptible_sleeper.h" namespace curve { namespace common { -class WaitInterval { +class WaitInterval { public: /** - * Init 初始化任务的执行间隔 + * Init: Execution interval of initialization task * - * @param[in] intervalMs 执行间隔单位是ms + * @param[in] intervalMs: The execution interval unit is ms */ void Init(uint64_t intervalMs); /** - * WaitForNextExcution 根据最近一次的执行时间点和周期确定需要等待多久之后再执行 + * WaitForNextExcution: Determines how long to wait before executing based + * on the latest execution time and cycle */ void WaitForNextExcution(); /** - * StopWait 退出sleep等待 + * StopWait: Exit Sleep Wait */ void StopWait(); private: - // 最近一次的执行时间 + // Last execution time uint64_t lastSend_; - // 任务的执行周期 + // Task execution cycle uint64_t intevalMs_; InterruptibleSleeper sleeper_; diff --git a/src/mds/nameserver2/clean_task.h b/src/mds/nameserver2/clean_task.h index 9001312870..c865ff6271 100644 --- a/src/mds/nameserver2/clean_task.h +++ b/src/mds/nameserver2/clean_task.h @@ -23,24 +23,26 @@ #ifndef SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ #define SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#include //NOLINT +#include //NOLINT + #include #include //NOLINT #include -#include //NOLINT -#include //NOLINT + #include "proto/nameserver2.pb.h" -#include "src/mds/nameserver2/task_progress.h" -#include "src/mds/nameserver2/clean_core.h" -#include "src/mds/nameserver2/async_delete_snapshot_entity.h" -#include "src/common/concurrent/dlock.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/concurrent/dlock.h" +#include "src/mds/nameserver2/async_delete_snapshot_entity.h" +#include "src/mds/nameserver2/clean_core.h" +#include "src/mds/nameserver2/task_progress.h" using curve::common::DLock; namespace curve { namespace mds { -typedef uint64_t TaskIDType; +typedef uint64_t TaskIDType; // default clean task retry times const uint32_t kDefaultTaskRetryTimes = 5; @@ -52,56 +54,40 @@ class Task { virtual void Run(void) = 0; std::function Closure() { - return [this] () { - Run(); - }; + return [this]() { Run(); }; } - TaskProgress GetTaskProgress(void) const { - return progress_; - } + TaskProgress GetTaskProgress(void) const { return progress_; } - void SetTaskProgress(TaskProgress progress) { - progress_ = progress; - } + void SetTaskProgress(TaskProgress progress) { progress_ = progress; } - TaskProgress* GetMutableTaskProgress(void) { - return &progress_; - } + TaskProgress* GetMutableTaskProgress(void) { return &progress_; } - void SetTaskID(TaskIDType taskID) { - taskID_ = taskID; - } + void SetTaskID(TaskIDType taskID) { taskID_ = taskID; } - TaskIDType GetTaskID(void) const { - return taskID_; - } + TaskIDType GetTaskID(void) const { return taskID_; } - void SetRetryTimes(uint32_t retry) { - retry_ = retry; - } + void SetRetryTimes(uint32_t retry) { retry_ = retry; } void Retry() { retry_--; progress_ = TaskProgress(); } - bool RetryTimesExceed() { - return retry_ == 0; - } + bool RetryTimesExceed() { return retry_ == 0; } protected: TaskIDType taskID_; TaskProgress progress_; - // 任务最大重试次数 + // Maximum number of task retries uint32_t retry_; }; -class SnapShotCleanTask: public Task { +class SnapShotCleanTask : public Task { public: - SnapShotCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo, - std::shared_ptr entity = nullptr) { + SnapShotCleanTask( + TaskIDType taskID, std::shared_ptr core, FileInfo fileInfo, + std::shared_ptr entity = nullptr) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -110,29 +96,29 @@ class SnapShotCleanTask: public Task { SetRetryTimes(kDefaultTaskRetryTimes); } void Run(void) override { - StatusCode ret = cleanCore_->CleanSnapShotFile(fileInfo_, - GetMutableTaskProgress()); + StatusCode ret = + cleanCore_->CleanSnapShotFile(fileInfo_, GetMutableTaskProgress()); if (asyncEntity_ != nullptr) { brpc::ClosureGuard doneGuard(asyncEntity_->GetClosure()); brpc::Controller* cntl = static_cast(asyncEntity_->GetController()); - DeleteSnapShotResponse *response = - asyncEntity_->GetDeleteResponse(); - const DeleteSnapShotRequest *request = - asyncEntity_->GetDeleteRequest(); + DeleteSnapShotResponse* response = + asyncEntity_->GetDeleteResponse(); + const DeleteSnapShotRequest* request = + asyncEntity_->GetDeleteRequest(); response->set_statuscode(ret); if (ret != StatusCode::kOK) { LOG(ERROR) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile fail, filename = " - << request->filename() - << ", sequencenum = " << request->seq() - << ", statusCode = " << ret; + << ", CleanSnapShotFile fail, filename = " + << request->filename() + << ", sequencenum = " << request->seq() + << ", statusCode = " << ret; } else { LOG(INFO) << "logid = " << cntl->log_id() - << ", CleanSnapShotFile ok, filename = " - << request->filename() - << ", sequencenum = " << request->seq(); + << ", CleanSnapShotFile ok, filename = " + << request->filename() + << ", sequencenum = " << request->seq(); } } return; @@ -144,10 +130,10 @@ class SnapShotCleanTask: public Task { std::shared_ptr asyncEntity_; }; -class CommonFileCleanTask: public Task { +class CommonFileCleanTask : public Task { public: CommonFileCleanTask(TaskIDType taskID, std::shared_ptr core, - FileInfo fileInfo) { + FileInfo fileInfo) { cleanCore_ = core; fileInfo_ = fileInfo; SetTaskProgress(TaskProgress()); @@ -211,4 +197,4 @@ class SegmentCleanTask : public Task { } // namespace mds } // namespace curve -#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ +#endif // SRC_MDS_NAMESERVER2_CLEAN_TASK_H_ diff --git a/src/snapshotcloneserver/clone/clone_service_manager.h b/src/snapshotcloneserver/clone/clone_service_manager.h index 0cd66e9d09..6e085df318 100644 --- a/src/snapshotcloneserver/clone/clone_service_manager.h +++ b/src/snapshotcloneserver/clone/clone_service_manager.h @@ -22,18 +22,18 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_SERVICE_MANAGER_H_ +#include #include #include -#include +#include "src/common/concurrent/dlock.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/common/wait_interval.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" #include "src/snapshotcloneserver/clone/clone_core.h" #include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/clone/clone_task_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/config.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" namespace curve { namespace snapshotcloneserver { @@ -44,26 +44,16 @@ class TaskCloneInfo { public: TaskCloneInfo() = default; - TaskCloneInfo(const CloneInfo &cloneInfo, - uint32_t progress) - : cloneInfo_(cloneInfo), - cloneProgress_(progress) {} + TaskCloneInfo(const CloneInfo& cloneInfo, uint32_t progress) + : cloneInfo_(cloneInfo), cloneProgress_(progress) {} - void SetCloneInfo(const CloneInfo &cloneInfo) { - cloneInfo_ = cloneInfo; - } + void SetCloneInfo(const CloneInfo& cloneInfo) { cloneInfo_ = cloneInfo; } - CloneInfo GetCloneInfo() const { - return cloneInfo_; - } + CloneInfo GetCloneInfo() const { return cloneInfo_; } - void SetCloneProgress(uint32_t progress) { - cloneProgress_ = progress; - } + void SetCloneProgress(uint32_t progress) { cloneProgress_ = progress; } - uint32_t GetCloneProgress() const { - return cloneProgress_; - } + uint32_t GetCloneProgress() const { return cloneProgress_; } Json::Value ToJsonObj() const { Json::Value cloneTaskObj; @@ -72,88 +62,76 @@ class TaskCloneInfo { cloneTaskObj["User"] = info.GetUser(); cloneTaskObj["File"] = info.GetDest(); cloneTaskObj["Src"] = info.GetSrc(); - cloneTaskObj["TaskType"] = static_cast ( - info.GetTaskType()); - cloneTaskObj["TaskStatus"] = static_cast ( - info.GetStatus()); + cloneTaskObj["TaskType"] = static_cast(info.GetTaskType()); + cloneTaskObj["TaskStatus"] = static_cast(info.GetStatus()); cloneTaskObj["IsLazy"] = info.GetIsLazy(); - cloneTaskObj["NextStep"] = static_cast (info.GetNextStep()); + cloneTaskObj["NextStep"] = static_cast(info.GetNextStep()); cloneTaskObj["Time"] = info.GetTime(); cloneTaskObj["Progress"] = GetCloneProgress(); - cloneTaskObj["FileType"] = static_cast (info.GetFileType()); + cloneTaskObj["FileType"] = static_cast(info.GetFileType()); return cloneTaskObj; } - void LoadFromJsonObj(const Json::Value &jsonObj) { + void LoadFromJsonObj(const Json::Value& jsonObj) { CloneInfo info; info.SetTaskId(jsonObj["UUID"].asString()); info.SetUser(jsonObj["User"].asString()); info.SetDest(jsonObj["File"].asString()); info.SetSrc(jsonObj["Src"].asString()); - info.SetTaskType(static_cast( - jsonObj["TaskType"].asInt())); - info.SetStatus(static_cast( - jsonObj["TaskStatus"].asInt())); + info.SetTaskType( + static_cast(jsonObj["TaskType"].asInt())); + info.SetStatus(static_cast(jsonObj["TaskStatus"].asInt())); info.SetIsLazy(jsonObj["IsLazy"].asBool()); info.SetNextStep(static_cast(jsonObj["NextStep"].asInt())); info.SetTime(jsonObj["Time"].asUInt64()); - info.SetFileType(static_cast( - jsonObj["FileType"].asInt())); + info.SetFileType( + static_cast(jsonObj["FileType"].asInt())); SetCloneInfo(info); } private: - CloneInfo cloneInfo_; - uint32_t cloneProgress_; + CloneInfo cloneInfo_; + uint32_t cloneProgress_; }; class CloneFilterCondition { public: CloneFilterCondition() - : uuid_(nullptr), - source_(nullptr), - destination_(nullptr), - user_(nullptr), - status_(nullptr), - type_(nullptr) {} - - CloneFilterCondition(const std::string *uuid, const std::string *source, - const std::string *destination, const std::string *user, - const std::string *status, const std::string *type) - : uuid_(uuid), - source_(source), - destination_(destination), - user_(user), - status_(status), - type_(type) {} - bool IsMatchCondition(const CloneInfo &cloneInfo); - - void SetUuid(const std::string *uuid) { - uuid_ = uuid; - } - void SetSource(const std::string *source) { - source_ = source; - } - void SetDestination(const std::string *destination) { + : uuid_(nullptr), + source_(nullptr), + destination_(nullptr), + user_(nullptr), + status_(nullptr), + type_(nullptr) {} + + CloneFilterCondition(const std::string* uuid, const std::string* source, + const std::string* destination, + const std::string* user, const std::string* status, + const std::string* type) + : uuid_(uuid), + source_(source), + destination_(destination), + user_(user), + status_(status), + type_(type) {} + bool IsMatchCondition(const CloneInfo& cloneInfo); + + void SetUuid(const std::string* uuid) { uuid_ = uuid; } + void SetSource(const std::string* source) { source_ = source; } + void SetDestination(const std::string* destination) { destination_ = destination; } - void SetUser(const std::string *user) { - user_ = user; - } - void SetStatus(const std::string *status) { - status_ = status; - } - void SetType(const std::string *type) { - type_ = type; - } + void SetUser(const std::string* user) { user_ = user; } + void SetStatus(const std::string* status) { status_ = status; } + void SetType(const std::string* type) { type_ = type; } private: - const std::string *uuid_; - const std::string *source_; - const std::string *destination_; - const std::string *user_; - const std::string *status_; - const std::string *type_; + const std::string* uuid_; + const std::string* source_; + const std::string* destination_; + const std::string* user_; + const std::string* status_; + const std::string* type_; }; class CloneServiceManagerBackend { public: @@ -161,7 +139,8 @@ class CloneServiceManagerBackend { virtual ~CloneServiceManagerBackend() {} /** - * @brief 后台扫描线程执行函数,扫描克隆卷是否存在 + * @brief Background scan thread execution function to scan for the + * existence of cloned volumes * */ virtual void Func() = 0; @@ -177,12 +156,9 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { public: explicit CloneServiceManagerBackendImpl( std::shared_ptr cloneCore) - : cloneCore_(cloneCore), - isStop_(true) { - } + : cloneCore_(cloneCore), isStop_(true) {} - ~CloneServiceManagerBackendImpl() { - } + ~CloneServiceManagerBackendImpl() {} void Func() override; void Init(uint32_t recordIntevalMs, uint32_t roundIntevalMs) override; @@ -191,13 +167,14 @@ class CloneServiceManagerBackendImpl : public CloneServiceManagerBackend { private: std::shared_ptr cloneCore_; - // 后台扫描线程,扫描clone卷是否存在 + // Background scan thread to check if clone volume exists std::thread backEndReferenceScanThread_; - // 当前后台扫描是否停止,用于支持start,stop功能 + // Is the current background scanning stopped? Used to + // support start and stop functions std::atomic_bool isStop_; - // 后台扫描线程记录使用定时器 + // Using a timer for background scanning thread records common::WaitInterval recordWaitInterval_; - // 后台扫描线程每轮使用定时器 + // The backend scanning thread uses a timer for each round common::WaitInterval roundWaitInterval_; }; @@ -207,250 +184,242 @@ class CloneServiceManager { std::shared_ptr cloneTaskMgr, std::shared_ptr cloneCore, std::shared_ptr cloneServiceManagerBackend) - : cloneTaskMgr_(cloneTaskMgr), - cloneCore_(cloneCore), - cloneServiceManagerBackend_(cloneServiceManagerBackend) { + : cloneTaskMgr_(cloneTaskMgr), + cloneCore_(cloneCore), + cloneServiceManagerBackend_(cloneServiceManagerBackend) { destFileLock_ = std::make_shared(); } virtual ~CloneServiceManager() {} /** - * @brief 初始化 + * @brief initialization * - * @return 错误码 + * @return error code */ - virtual int Init(const SnapshotCloneServerOptions &option); + virtual int Init(const SnapshotCloneServerOptions& option); /** - * @brief 启动服务 + * @brief Start Service * - * @return 错误码 + * @return error code */ virtual int Start(); /** - * @brief 停止服务 + * @brief Stop service * */ virtual void Stop(); /** - * @brief 从文件或快照克隆出一个文件 + * @brief Clone a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source: Uuid of file or snapshot + * @param user: The user of the file or snapshot + * @param destination: Destination file + * @param lazyFlag: Is in lazy mode + * @param closure: Asynchronous callback entity + * @param[out] taskId: Task ID * - * @return 错误码 + * @return error code */ - virtual int CloneFile(const UUID &source, - const std::string &user, - const std::string &destination, - const std::string &poolset, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int CloneFile(const UUID& source, const std::string& user, + const std::string& destination, + const std::string& poolset, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 从文件或快照恢复一个文件 + * @brief Restore a file from a file or snapshot * - * @param source 文件或快照的uuid - * @param user 文件或快照的用户 - * @param destination 目标文件名 - * @param lazyFlag 是否lazy模式 - * @param closure 异步回调实体 - * @param[out] taskId 任务ID + * @param source: Uuid of file or snapshot + * @param user: The user of the file or snapshot + * @param destination: Destination file name + * @param lazyFlag: Is in lazy mode + * @param closure: Asynchronous callback entity + * @param[out] taskId: Task ID * - * @return 错误码 + * @return error code */ - virtual int RecoverFile(const UUID &source, - const std::string &user, - const std::string &destination, - bool lazyFlag, - std::shared_ptr closure, - TaskIdType *taskId); + virtual int RecoverFile(const UUID& source, const std::string& user, + const std::string& destination, bool lazyFlag, + std::shared_ptr closure, + TaskIdType* taskId); /** - * @brief 安装克隆文件的数据,用于Lazy克隆 + * @brief Install data from clone files for Lazy cloning * - * @param user 用户 - * @param taskId 任务ID + * @param user: user + * @param taskId: Task ID * - * @return 错误码 + * @return error code */ - virtual int Flatten( - const std::string &user, - const TaskIdType &taskId); + virtual int Flatten(const std::string& user, const TaskIdType& taskId); /** - * @brief 查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user * - * @param user 用户名 - * @param info 克隆/恢复任务信息 + * @param user: username + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfo(const std::string &user, - std::vector *info); + virtual int GetCloneTaskInfo(const std::string& user, + std::vector* info); /** - * @brief 通过Id查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user + * through ID * - * @param user 用户名 - * @param taskId 指定的任务Id - * @param info 克隆/恢复任务信息 + * @param user: username + * @param taskId: Task Id specified + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoById( - const std::string &user, - const TaskIdType &taskId, - std::vector *info); + virtual int GetCloneTaskInfoById(const std::string& user, + const TaskIdType& taskId, + std::vector* info); /** - * @brief 通过文件名查询某个用户的克隆/恢复任务信息 + * @brief Query the clone/restore task information of a certain user through + * a file name * - * @param user 用户名 - * @param fileName 指定的文件名 - * @param info 克隆/恢复任务信息 + * @param user: username + * @param fileName: The file name specified + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByName( - const std::string &user, - const std::string &fileName, - std::vector *info); + virtual int GetCloneTaskInfoByName(const std::string& user, + const std::string& fileName, + std::vector* info); /** - * @brief 通过过滤条件查询某个用户的克隆/恢复任务信息 + * @brief: Query a user's clone/restore task information through filtering + * criteria * - * @param filter 过滤条件 - * @param info 克隆/恢复任务信息 + * @param filter: filtering conditions + * @param info: Clone/Restore Task Information * - * @return 错误码 + * @return error code */ - virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition &filter, - std::vector *info); + virtual int GetCloneTaskInfoByFilter(const CloneFilterCondition& filter, + std::vector* info); /** - * @brief 查询src是否有依赖 + * @brief: Check if src has dependencies * - * @param src 指定的文件名 - * @param refStatus 0表示没有依赖,1表示有依赖,2表示需要进一步确认 - * @param needCheckFiles 需要进一步确认的文件列表 + * @param src: specified file name + * @param refStatus: 0 indicates no dependencies, 1 indicates dependencies, + * and 2 indicates further confirmation is needed + * @param needCheckFiles: List of files that require further confirmation * - * @return 错误码 + * @return error code */ - virtual int GetCloneRefStatus(const std::string &src, - CloneRefStatus *refStatus, - std::vector *needCheckFiles); + virtual int GetCloneRefStatus(const std::string& src, + CloneRefStatus* refStatus, + std::vector* needCheckFiles); /** - * @brief 清除失败的clone/Recover任务、状态、文件 + * @brief Clear failed clone/recover tasks, status, files * - * @param user 用户名 - * @param taskId 任务Id + * @param user: username + * @param taskId: Task Id * - * @return 错误码 + * @return error code */ - virtual int CleanCloneTask(const std::string &user, - const TaskIdType &taskId); + virtual int CleanCloneTask(const std::string& user, + const TaskIdType& taskId); /** - * @brief 重启后恢复未完成clone和recover任务 + * @brief Restore unfinished clone and recover tasks after restarting * - * @return 错误码 + * @return error code */ virtual int RecoverCloneTask(); // for test - void SetDLock(std::shared_ptr dlock) { - dlock_ = dlock; - } + void SetDLock(std::shared_ptr dlock) { dlock_ = dlock; } private: /** - * @brief 从给定的任务列表中获取指定用户的任务集 + * @brief Get the task set of the specified user from the given task list * - * @param cloneInfos 克隆/恢复信息 - * @param user 用户信息 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos: Clone/Restore Information + * @param user: User information + * @param[out] info: Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - const std::string &user, - std::vector *info); + const std::string& user, + std::vector* info); /** - * @brief 从给定的任务列表中获取符合过滤条件的任务集 + * @brief Retrieve task sets that meet the filtering criteria from the given + * task list * - * @param cloneInfos 克隆/恢复信息 - * @param filter 过滤条件 - * @param[out] info 克隆/恢复任务信息 + * @param cloneInfos: Clone/Restore Information + * @param filter: Filtering conditions + * @param[out] info: Clone/restore task information * - * @return 错误码 + * @return error code */ int GetCloneTaskInfoInner(std::vector cloneInfos, - CloneFilterCondition filter, - std::vector *info); + CloneFilterCondition filter, + std::vector* info); /** - * @brief 获取已经完成任务信息 + * @brief to obtain completed task information * - * @param taskId 任务ID - * @param taskCloneInfoOut 克隆任务信息 + * @param taskId: Task ID + * @param taskCloneInfoOut: Clone task information * - * @return 错误码 + * @return error code */ - int GetFinishedCloneTask( - const TaskIdType &taskId, - TaskCloneInfo *taskCloneInfoOut); + int GetFinishedCloneTask(const TaskIdType& taskId, + TaskCloneInfo* taskCloneInfoOut); /** - * @brief 根据克隆任务信息恢复克隆任务 + * @brief Restore clone task based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo: Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCloneTaskInternal(const CloneInfo &cloneInfo); + int RecoverCloneTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 根据克隆任务信息恢复清除克隆任务 + * @brief Restore and clear clone tasks based on clone task information * - * @param cloneInfo 克隆任务信息 + * @param cloneInfo: Clone task information * - * @return 错误码 + * @return error code */ - int RecoverCleanTaskInternal(const CloneInfo &cloneInfo); + int RecoverCleanTaskInternal(const CloneInfo& cloneInfo); /** - * @brief 构建和push Lazy的任务 + * @brief Task of building and pushing Lazy * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo: Clone task information + * @param closure: Asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); /** - * @brief 构建和push 非Lazy的任务 + * @brief Build and push non Lazy tasks * - * @param cloneInfo 克隆任务信息 - * @param closure 异步回调实体 + * @param cloneInfo: Clone task information + * @param closure: Asynchronous callback entity * - * @return 错误码 + * @return error code */ int BuildAndPushCloneOrRecoverNotLazyTask( - CloneInfo cloneInfo, - std::shared_ptr closure); + CloneInfo cloneInfo, std::shared_ptr closure); private: std::shared_ptr dlockOpts_; @@ -461,8 +430,6 @@ class CloneServiceManager { std::shared_ptr cloneServiceManagerBackend_; }; - - } // namespace snapshotcloneserver } // namespace curve diff --git a/src/snapshotcloneserver/clone/clone_task.h b/src/snapshotcloneserver/clone/clone_task.h index 8ea5c6be51..2ddc10976e 100644 --- a/src/snapshotcloneserver/clone/clone_task.h +++ b/src/snapshotcloneserver/clone/clone_task.h @@ -23,17 +23,17 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_H_ -#include #include +#include -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/common/concurrent/dlock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_closure.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/curvefs_client.h" +#include "src/snapshotcloneserver/common/snapshotclone_metric.h" #include "src/snapshotcloneserver/common/task.h" #include "src/snapshotcloneserver/common/task_info.h" -#include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/common/curvefs_client.h" -#include "src/snapshotcloneserver/clone/clone_closure.h" -#include "src/common/concurrent/dlock.h" using ::curve::common::DLock; @@ -42,33 +42,23 @@ namespace snapshotcloneserver { class CloneTaskInfo : public TaskInfo { public: - CloneTaskInfo(const CloneInfo &cloneInfo, - std::shared_ptr metric, - std::shared_ptr closure) + CloneTaskInfo(const CloneInfo& cloneInfo, + std::shared_ptr metric, + std::shared_ptr closure) : TaskInfo(), cloneInfo_(cloneInfo), metric_(metric), closure_(closure) {} - CloneInfo& GetCloneInfo() { - return cloneInfo_; - } + CloneInfo& GetCloneInfo() { return cloneInfo_; } - const CloneInfo& GetCloneInfo() const { - return cloneInfo_; - } + const CloneInfo& GetCloneInfo() const { return cloneInfo_; } - TaskIdType GetTaskId() const { - return cloneInfo_.GetTaskId(); - } + TaskIdType GetTaskId() const { return cloneInfo_.GetTaskId(); } - void UpdateMetric() { - metric_->Update(this); - } + void UpdateMetric() { metric_->Update(this); } - std::shared_ptr GetClosure() { - return closure_; - } + std::shared_ptr GetClosure() { return closure_; } private: CloneInfo cloneInfo_; @@ -76,20 +66,16 @@ class CloneTaskInfo : public TaskInfo { std::shared_ptr closure_; }; -std::ostream& operator<<(std::ostream& os, const CloneTaskInfo &taskInfo); +std::ostream& operator<<(std::ostream& os, const CloneTaskInfo& taskInfo); class CloneTaskBase : public Task { public: - CloneTaskBase(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) - : Task(taskId), - taskInfo_(taskInfo), - core_(core) {} - - std::shared_ptr GetTaskInfo() const { - return taskInfo_; - } + CloneTaskBase(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) + : Task(taskId), taskInfo_(taskInfo), core_(core) {} + + std::shared_ptr GetTaskInfo() const { return taskInfo_; } protected: std::shared_ptr taskInfo_; @@ -98,9 +84,8 @@ class CloneTaskBase : public Task { class CloneTask : public CloneTaskBase { public: - CloneTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneTask(const TaskIdType& taskId, std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} void Run() override { @@ -121,17 +106,14 @@ class CloneTask : public CloneTaskBase { } }; - class CloneCleanTask : public CloneTaskBase { public: - CloneCleanTask(const TaskIdType &taskId, - std::shared_ptr taskInfo, - std::shared_ptr core) + CloneCleanTask(const TaskIdType& taskId, + std::shared_ptr taskInfo, + std::shared_ptr core) : CloneTaskBase(taskId, taskInfo, core) {} - void Run() override { - core_->HandleCleanCloneOrRecoverTask(taskInfo_); - } + void Run() override { core_->HandleCleanCloneOrRecoverTask(taskInfo_); } }; struct SnapCloneCommonClosure : public SnapCloneClosure { @@ -145,9 +127,9 @@ struct SnapCloneCommonClosure : public SnapCloneClosure { }; struct CreateCloneChunkContext { - // 数据源 + // Data source std::string location; - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; // seqNum uint64_t sn; @@ -155,16 +137,16 @@ struct CreateCloneChunkContext { uint64_t csn; // chunk size uint64_t chunkSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; - // chunk信息 - struct CloneChunkInfo *cloneChunkInfo; + // Chunk Information + struct CloneChunkInfo* cloneChunkInfo; }; using CreateCloneChunkContextPtr = std::shared_ptr; @@ -173,21 +155,20 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { CreateCloneChunkClosure( std::shared_ptr tracker, CreateCloneChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "CreateCloneChunkClosure return fail" - << ", ret = " << context_->retCode - << ", location = " << context_->location - << ", logicalPoolId = " << context_->cidInfo.lpid_ - << ", copysetId = " << context_->cidInfo.cpid_ - << ", chunkId = " << context_->cidInfo.cid_ - << ", seqNum = " << context_->sn - << ", csn = " << context_->csn - << ", taskid = " << context_->taskid; + << ", ret = " << context_->retCode + << ", location = " << context_->location + << ", logicalPoolId = " << context_->cidInfo.lpid_ + << ", copysetId = " << context_->cidInfo.cpid_ + << ", chunkId = " << context_->cidInfo.cid_ + << ", seqNum = " << context_->sn + << ", csn = " << context_->csn + << ", taskid = " << context_->taskid; } tracker_->PushResultContext(context_); tracker_->HandleResponse(context_->retCode); @@ -197,21 +178,21 @@ struct CreateCloneChunkClosure : public SnapCloneClosure { }; struct RecoverChunkContext { - // chunkid 信息 + // Chunkid information ChunkIDInfo cidInfo; - // chunk的分片index + // Chunk's sharding index uint64_t partIndex; - // 总的chunk分片数 + // Total Chunk Fragments uint64_t totalPartNum; - // 分片大小 + // Slice size uint64_t partSize; - // 返回值 + // Return value int retCode; // taskid TaskIdType taskid; - // 异步请求开始时间 + // Asynchronous request start time uint64_t startTime; - // 异步请求重试总时间 + // Total retry time for asynchronous requests uint64_t clientAsyncMethodRetryTimeSec; }; @@ -219,17 +200,15 @@ using RecoverChunkContextPtr = std::shared_ptr; struct RecoverChunkClosure : public SnapCloneClosure { RecoverChunkClosure(std::shared_ptr tracker, - RecoverChunkContextPtr context) - : tracker_(tracker), - context_(context) {} + RecoverChunkContextPtr context) + : tracker_(tracker), context_(context) {} void Run() { std::unique_ptr self_guard(this); context_->retCode = GetRetCode(); if (context_->retCode < 0) { LOG(WARNING) << "RecoverChunkClosure return fail" << ", ret = " << context_->retCode - << ", logicalPoolId = " - << context_->cidInfo.lpid_ + << ", logicalPoolId = " << context_->cidInfo.lpid_ << ", copysetId = " << context_->cidInfo.cpid_ << ", chunkId = " << context_->cidInfo.cid_ << ", partIndex = " << context_->partIndex diff --git a/src/snapshotcloneserver/clone/clone_task_manager.h b/src/snapshotcloneserver/clone/clone_task_manager.h index d9607ccedc..56a030a8da 100644 --- a/src/snapshotcloneserver/clone/clone_task_manager.h +++ b/src/snapshotcloneserver/clone/clone_task_manager.h @@ -23,50 +23,46 @@ #ifndef SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ #define SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ -#include -#include #include -#include #include +#include +#include +#include #include // NOLINT -#include "src/snapshotcloneserver/clone/clone_task.h" -#include "src/snapshotcloneserver/common/thread_pool.h" #include "src/common/concurrent/rw_lock.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/clone/clone_task.h" #include "src/snapshotcloneserver/common/config.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" -#include "src/snapshotcloneserver/clone/clone_core.h" +#include "src/snapshotcloneserver/common/thread_pool.h" -using ::curve::common::RWLock; +using ::curve::common::LockGuard; +using ::curve::common::Mutex; using ::curve::common::ReadLockGuard; +using ::curve::common::RWLock; using ::curve::common::WriteLockGuard; -using ::curve::common::Mutex; -using ::curve::common::LockGuard; namespace curve { namespace snapshotcloneserver { class CloneTaskManager { public: - explicit CloneTaskManager( - std::shared_ptr core, - std::shared_ptr cloneMetric) + explicit CloneTaskManager(std::shared_ptr core, + std::shared_ptr cloneMetric) : isStop_(true), core_(core), cloneMetric_(cloneMetric), cloneTaskManagerScanIntervalMs_(0) {} - ~CloneTaskManager() { - Stop(); - } + ~CloneTaskManager() { Stop(); } int Init(std::shared_ptr stage1Pool, - std::shared_ptr stage2Pool, - std::shared_ptr commonPool, - const SnapshotCloneServerOptions &option) { - cloneTaskManagerScanIntervalMs_ = - option.cloneTaskManagerScanIntervalMs; + std::shared_ptr stage2Pool, + std::shared_ptr commonPool, + const SnapshotCloneServerOptions& option) { + cloneTaskManagerScanIntervalMs_ = option.cloneTaskManagerScanIntervalMs; stage1Pool_ = stage1Pool; stage2Pool_ = stage2Pool; commonPool_ = commonPool; @@ -78,40 +74,39 @@ class CloneTaskManager { void Stop(); /** - * @brief 往任务管理器中加入任务 + * @brief Add a task to the task manager * - * 用于非Lazy克隆及其他删除克隆等管控面的请求 + * Request for non Lazy clones and other deletion of control surfaces such + * as clones * - * @param task 任务 + * @param task: task * - * @return 错误码 + * @return error code */ - int PushCommonTask( - std::shared_ptr task); + int PushCommonTask(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段一的的任务 + * @brief Add LazyClone Phase 1 tasks to the task manager * - * @param task 任务 + * @param task: task * - * @return 错误码 + * @return error code */ - int PushStage1Task( - std::shared_ptr task); + int PushStage1Task(std::shared_ptr task); /** - * @brief 往任务管理器中加入LazyClone阶段二的的任务 + * @brief: Add LazyClone Phase 2 tasks to the task manager * - * 目前只用于重启恢复时,将Lazy克隆恢复克隆数据阶段的任务加入任务管理器 + * At present, it is only used for adding tasks from the Lazy clone recovery + * clone data stage to the task manager during restart recovery * - * @param task 任务 + * @param task: task * - * @return 错误码 + * @return error code */ - int PushStage2Task( - std::shared_ptr task); + int PushStage2Task(std::shared_ptr task); - std::shared_ptr GetTask(const TaskIdType &taskId) const; + std::shared_ptr GetTask(const TaskIdType& taskId) const; private: void BackEndThreadFunc(); @@ -120,51 +115,52 @@ class CloneTaskManager { void ScanStage2Tasks(); /** - * @brief 往对应线程池和map中push任务 + * @brief pushes tasks to the corresponding thread pool and map * - * @param task 任务 - * @param taskMap 任务表 - * @param taskMapMutex 任务表和线程池的锁 - * @param taskPool 线程池 + * @param task: Task + * @param taskMap: Task table + * @param taskMapMutex: Task table and thread pool locks + * @param taskPool: Thread Pool * - * @return 错误码 + * @return error code */ int PushTaskInternal( std::shared_ptr task, - std::map > *taskMap, - Mutex *taskMapMutex, - std::shared_ptr taskPool); + std::map >* taskMap, + Mutex* taskMapMutex, std::shared_ptr taskPool); private: - // 后端线程 + // Backend Thread std::thread backEndThread; - // id->克隆任务表 + // ID -> Clone Task Table std::map > cloneTaskMap_; mutable RWLock cloneTaskMapLock_; - // 存放stage1Pool_池的当前任务,key为destination + // Storing stage1Pool_ The current task of the pool, with key as destination std::map > stage1TaskMap_; mutable Mutex stage1TasksLock_; - // 存放stage1Poo2_池的当前任务,key为destination + // Storage stage1Poo2_ The current task of the pool, with key as destination std::map > stage2TaskMap_; mutable Mutex stage2TasksLock_; - // 存放commonPool_池的当前任务 + // Store commonPool_ Current task of the pool std::map > commonTaskMap_; mutable Mutex commonTasksLock_; - // 用于Lazy克隆元数据部分的线程池 + // Thread pool for Lazy clone metadata section std::shared_ptr stage1Pool_; - // 用于Lazy克隆数据部分的线程池 + // Thread pool for Lazy clone data section std::shared_ptr stage2Pool_; - // 用于非Lazy克隆和删除克隆等其他管控面的请求的线程池 + // Thread pool for requests for non Lazy clones and deletion of clones and + // other control surfaces std::shared_ptr commonPool_; - // 当前任务管理是否停止,用于支持start,stop功能 + // Is the current task management stopped? Used to support start and stop + // functions std::atomic_bool isStop_; // clone core @@ -173,16 +169,11 @@ class CloneTaskManager { // metric std::shared_ptr cloneMetric_; - // CloneTaskManager 后台线程扫描间隔 + // CloneTaskManager backend thread scan interval uint32_t cloneTaskManagerScanIntervalMs_; }; } // namespace snapshotcloneserver } // namespace curve - - - - - #endif // SRC_SNAPSHOTCLONESERVER_CLONE_CLONE_TASK_MANAGER_H_ diff --git a/src/snapshotcloneserver/common/task_info.h b/src/snapshotcloneserver/common/task_info.h index cc72201d52..e2835c5d59 100644 --- a/src/snapshotcloneserver/common/task_info.h +++ b/src/snapshotcloneserver/common/task_info.h @@ -23,11 +23,10 @@ #ifndef SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ #define SRC_SNAPSHOTCLONESERVER_COMMON_TASK_INFO_H_ - -#include -#include -#include //NOLINT #include +#include +#include //NOLINT +#include #include "src/common/concurrent/concurrent.h" @@ -36,10 +35,7 @@ namespace snapshotcloneserver { class TaskInfo { public: - TaskInfo() - : progress_(0), - isFinish_(false), - isCanceled_(false) {} + TaskInfo() : progress_(0), isFinish_(false), isCanceled_(false) {} virtual ~TaskInfo() {} TaskInfo(const TaskInfo&) = delete; @@ -48,59 +44,47 @@ class TaskInfo { TaskInfo& operator=(TaskInfo&&) = default; /** - * @brief 设置任务完成度百分比 + * @brief Set task completion percentage * - * @param persent 任务完成度百分比 + * @param persent: task completion percentage */ - void SetProgress(uint32_t persent) { - progress_ = persent; - } + void SetProgress(uint32_t persent) { progress_ = persent; } /** - * @brief 获取任务完成度百分比 + * @brief Get task completion percentage * - * @return 任务完成度百分比 + * @return Task completion percentage */ - uint32_t GetProgress() const { - return progress_; - } + uint32_t GetProgress() const { return progress_; } /** - * @brief 完成任务 + * @brief Complete the task */ - void Finish() { - isFinish_.store(true); - } + void Finish() { isFinish_.store(true); } /** - * @brief 获取任务是否完成 + * @brief: Is the task completed * - * @retval true 任务完成 - * @retval false 任务未完成 + * @retval true: Task completed + * @retval false: Task not completed */ - bool IsFinish() const { - return isFinish_.load(); - } + bool IsFinish() const { return isFinish_.load(); } /** - * @brief 取消任务 + * @brief Cancel Task */ - void Cancel() { - isCanceled_ = true; - } + void Cancel() { isCanceled_ = true; } /** - * @brief 获取任务是否取消 + * @brief: Check if the task is canceled * - * @retval true 任务已取消 - * @retval false 任务未取消 + * @retval true: The task has been canceled + * @retval false: The task was not canceled */ - bool IsCanceled() const { - return isCanceled_; - } + bool IsCanceled() const { return isCanceled_; } /** - * @brief 重置任务 + * @brief reset task */ void Reset() { isFinish_.store(false); @@ -108,26 +92,24 @@ class TaskInfo { } /** - * @brief 获取任务锁的引用,以便使用LockGuard加锁解锁 + * @brief: Obtain a reference to the task lock for unlocking using LockGuard * - * 用于同步任务完成和取消功能 - * 1. 任务完成前,先锁定任务,然后判断任务是否取消, - * 若已取消,则释放锁, - * 否则执行任务完成逻辑之后释放锁。 - * 2. 任务取消前,先锁定任务,然后判断任务是否完成, - * 若已完成,则释放锁, - * 否则执行任务取消逻辑之后释放锁。 + * Used to synchronize task completion and cancellation functions + * 1. Before completing the task, first lock the task and then determine + * whether the task is cancelled, If cancelled, release the lock, + * Otherwise, release the lock after completing the logic of the task. + * 2. Before canceling a task, first lock the task and then determine + * whether the task is completed, If completed, release the lock, + * Otherwise, execute the task to cancel the logic and release the lock. */ - curve::common::Mutex& GetLockRef() { - return lock_; - } + curve::common::Mutex& GetLockRef() { return lock_; } private: - // 任务完成度百分比 + // Task completion percentage uint32_t progress_; - // 任务任务是否结束 + // Is the task completed std::atomic_bool isFinish_; - // 任务是否被取消 + // Has the task been canceled bool isCanceled_; mutable curve::common::Mutex lock_; }; diff --git a/src/snapshotcloneserver/common/thread_pool.h b/src/snapshotcloneserver/common/thread_pool.h index 1f7b4ea697..b9f553b671 100644 --- a/src/snapshotcloneserver/common/thread_pool.h +++ b/src/snapshotcloneserver/common/thread_pool.h @@ -24,6 +24,7 @@ #define SRC_SNAPSHOTCLONESERVER_COMMON_THREAD_POOL_H_ #include + #include "src/common/concurrent/task_thread_pool.h" #include "src/snapshotcloneserver/common/task.h" @@ -31,52 +32,49 @@ namespace curve { namespace snapshotcloneserver { /** - * @brief 快照线程池 + * @brief Snapshot thread pool */ class ThreadPool { public: - /** - * @brief 构造函数 - * - * @param threadNum 最大线程数 - */ - explicit ThreadPool(int threadNum) - : threadNum_(threadNum) {} /** - * @brief 启动线程池 + * @brief Constructor + * + * @param threadNum: Maximum number of threads + */ + explicit ThreadPool(int threadNum) : threadNum_(threadNum) {} + /** + * @brief Start Thread Pool */ int Start(); /** - * @brief 停止线程池 + * @brief Stop thread pool */ void Stop(); /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task: Snapshot task */ void PushTask(std::shared_ptr task) { threadPool_.Enqueue(task->clousre()); } /** - * @brief 添加快照任务 + * @brief Add snapshot task * - * @param task 快照任务 + * @param task: Snapshot task */ - void PushTask(Task* task) { - threadPool_.Enqueue(task->clousre()); - } + void PushTask(Task* task) { threadPool_.Enqueue(task->clousre()); } private: /** - * @brief 通用线程池 + * @brief Universal Thread Pool */ curve::common::TaskThreadPool<> threadPool_; /** - * @brief 线程数 + * @brief Number of threads */ int threadNum_; }; diff --git a/src/tools/common.cpp b/src/tools/common.cpp index 35f29bf738..cdcdc369ba 100644 --- a/src/tools/common.cpp +++ b/src/tools/common.cpp @@ -29,11 +29,11 @@ namespace curve { namespace tool { void TrimMetricString(std::string* str) { - // 去掉头部空格 + // Remove header spaces str->erase(0, str->find_first_not_of(" ")); - // 去掉尾部回车 + // Remove the rear carriage return str->erase(str->find_last_not_of("\r\n") + 1); - // 去掉两边双引号 + // Remove double quotes from both sides str->erase(0, str->find_first_not_of("\"")); str->erase(str->find_last_not_of("\"") + 1); } diff --git a/src/tools/common.h b/src/tools/common.h index 1465a76ac7..132eec8360 100644 --- a/src/tools/common.h +++ b/src/tools/common.h @@ -24,8 +24,9 @@ #define SRC_TOOLS_COMMON_H_ #include -#include + #include +#include DECLARE_uint32(logicalPoolId); DECLARE_uint32(copysetId); @@ -34,9 +35,9 @@ namespace curve { namespace tool { /** - * @brief 格式化,从metric获取的string - * 去掉string两边的双引号以及空格和回车 - * @param[out] str 要格式化的string + * @brief formatting, string obtained from metric + * Remove double quotes, spaces, and carriage returns around the string + * @param[out] str: The string to format */ void TrimMetricString(std::string* str); diff --git a/src/tools/status_tool.cpp b/src/tools/status_tool.cpp index 4444f51fd2..ea855bf094 100644 --- a/src/tools/status_tool.cpp +++ b/src/tools/status_tool.cpp @@ -20,17 +20,22 @@ * Author: charisu */ #include "src/tools/status_tool.h" + #include DEFINE_bool(offline, false, "if true, only list offline chunskervers"); -DEFINE_bool(unhealthy, false, "if true, only list chunkserver that unhealthy " - "ratio greater than 0"); -DEFINE_bool(checkHealth, true, "if true, it will check the health " - "state of chunkserver in chunkserver-list"); -DEFINE_bool(checkCSAlive, false, "if true, it will check the online state of " - "chunkservers with rpc in chunkserver-list"); -DEFINE_bool(listClientInRepo, true, "if true, list-client will list all clients" - " include that in repo"); +DEFINE_bool(unhealthy, false, + "if true, only list chunkserver that unhealthy " + "ratio greater than 0"); +DEFINE_bool(checkHealth, true, + "if true, it will check the health " + "state of chunkserver in chunkserver-list"); +DEFINE_bool(checkCSAlive, false, + "if true, it will check the online state of " + "chunkservers with rpc in chunkserver-list"); +DEFINE_bool(listClientInRepo, true, + "if true, list-client will list all clients" + " include that in repo"); DEFINE_uint64(walSegmentSize, 8388608, "wal segment size"); DECLARE_string(mdsAddr); DECLARE_string(etcdAddr); @@ -42,8 +47,7 @@ const char* kProtocalCurve = "curve"; namespace curve { namespace tool { -std::ostream& operator<<(std::ostream& os, - std::vector strs) { +std::ostream& operator<<(std::ostream& os, std::vector strs) { for (uint32_t i = 0; i < strs.size(); ++i) { if (i != 0) { os << ", "; @@ -54,11 +58,10 @@ std::ostream& operator<<(std::ostream& os, } std::string ToString(ServiceName name) { - static std::map serviceNameMap = - {{ServiceName::kMds, "mds"}, - {ServiceName::kEtcd, "etcd"}, - {ServiceName::kSnapshotCloneServer, - "snapshot-clone-server"}}; + static std::map serviceNameMap = { + {ServiceName::kMds, "mds"}, + {ServiceName::kEtcd, "etcd"}, + {ServiceName::kSnapshotCloneServer, "snapshot-clone-server"}}; return serviceNameMap[name]; } @@ -83,7 +86,7 @@ int StatusTool::Init(const std::string& command) { } if (CommandNeedSnapshotClone(command)) { int snapshotRet = snapshotClient_->Init(FLAGS_snapshotCloneAddr, - FLAGS_snapshotCloneDummyPort); + FLAGS_snapshotCloneDummyPort); switch (snapshotRet) { case 0: // success @@ -166,7 +169,7 @@ int StatusTool::SpaceCmd() { double physicalUsedRatio = 0; if (spaceInfo.totalChunkSize != 0) { physicalUsedRatio = static_cast(spaceInfo.usedChunkSize) / - spaceInfo.totalChunkSize; + spaceInfo.totalChunkSize; } double logicalUsedRatio = 0; @@ -175,28 +178,28 @@ int StatusTool::SpaceCmd() { double createdFileRatio = 0; if (spaceInfo.totalCapacity != 0) { logicalUsedRatio = static_cast(spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; - logicalLeftRatio = static_cast( - spaceInfo.totalCapacity - spaceInfo.allocatedSize) / - spaceInfo.totalCapacity; + spaceInfo.totalCapacity; + logicalLeftRatio = static_cast(spaceInfo.totalCapacity - + spaceInfo.allocatedSize) / + spaceInfo.totalCapacity; createdFileRatio = static_cast(spaceInfo.currentFileSize) / - spaceInfo.totalCapacity; + spaceInfo.totalCapacity; } if (spaceInfo.allocatedSize != 0) { canBeRecycledRatio = static_cast(spaceInfo.recycleAllocSize) / - spaceInfo.allocatedSize; + spaceInfo.allocatedSize; } - std:: cout.setf(std::ios::fixed); + std::cout.setf(std::ios::fixed); std::cout << std::setprecision(2); std::cout << "Space info:" << std::endl; - std::cout << "physical: total = " - << spaceInfo.totalChunkSize / mds::kGB << "GB" - << ", used = " << spaceInfo.usedChunkSize / mds::kGB - << "GB(" << physicalUsedRatio * 100 << "%), left = " + std::cout << "physical: total = " << spaceInfo.totalChunkSize / mds::kGB + << "GB" + << ", used = " << spaceInfo.usedChunkSize / mds::kGB << "GB(" + << physicalUsedRatio * 100 << "%), left = " << (spaceInfo.totalChunkSize - spaceInfo.usedChunkSize) / mds::kGB << "GB(" << (1 - physicalUsedRatio) * 100 << "%)" << std::endl; - std::cout << "logical: total = " - << spaceInfo.totalCapacity / mds::kGB << "GB" + std::cout << "logical: total = " << spaceInfo.totalCapacity / mds::kGB + << "GB" << ", used = " << spaceInfo.allocatedSize / mds::kGB << "GB" << "(" << logicalUsedRatio * 100 << "%, can be recycled = " << spaceInfo.recycleAllocSize / mds::kGB << "GB(" @@ -205,18 +208,19 @@ int StatusTool::SpaceCmd() { << (spaceInfo.totalCapacity - spaceInfo.allocatedSize) / mds::kGB << "GB(" << logicalLeftRatio * 100 << "%)" << ", created file size = " - << spaceInfo.currentFileSize / mds::kGB - << "GB(" << createdFileRatio * 100 << "%)" << std::endl; + << spaceInfo.currentFileSize / mds::kGB << "GB(" + << createdFileRatio * 100 << "%)" << std::endl; std::cout << "Every Logicalpool Space info:" << std::endl; - for (const auto &i : spaceInfo.lpoolspaceinfo) { - std::cout << "logicalPool: name = "<< i.second.poolName - << ", poolid = " << i.first - << ", total = "<< i.second.totalCapacity / mds::kGB << "GB" - << ", used = " << i.second.allocatedSize / mds::kGB << "GB" - << ", left = " << (i.second.totalCapacity - - i.second.allocatedSize) / mds::kGB - << "GB"<< std::endl; + for (const auto& i : spaceInfo.lpoolspaceinfo) { + std::cout << "logicalPool: name = " << i.second.poolName + << ", poolid = " << i.first + << ", total = " << i.second.totalCapacity / mds::kGB << "GB" + << ", used = " << i.second.allocatedSize / mds::kGB << "GB" + << ", left = " + << (i.second.totalCapacity - i.second.allocatedSize) / + mds::kGB + << "GB" << std::endl; } return 0; } @@ -264,9 +268,9 @@ int StatusTool::ChunkServerListCmd() { double unhealthyRatio = 0.0; if (FLAGS_checkCSAlive) { - // 发RPC重置online状态 - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + // Send RPC to reset online status + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); bool isOnline = copysetCheckCore_->CheckChunkServerOnline(csAddr); if (isOnline) { chunkserver.set_onlinestate(OnlineState::ONLINE); @@ -290,7 +294,7 @@ int StatusTool::ChunkServerListCmd() { if (FLAGS_checkHealth) { copysetCheckCore_->CheckCopysetsOnChunkServer(csId); const auto& statistics = - copysetCheckCore_->GetCopysetStatistics(); + copysetCheckCore_->GetCopysetStatistics(); unhealthyRatio = statistics.unhealthyRatio; if (FLAGS_unhealthy && unhealthyRatio == 0) { continue; @@ -309,8 +313,7 @@ int StatusTool::ChunkServerListCmd() { std::cout << "chunkServerID = " << csId << ", diskType = " << chunkserver.disktype() << ", hostIP = " << chunkserver.hostip() - << ", port = " << chunkserver.port() - << ", rwStatus = " + << ", port = " << chunkserver.port() << ", rwStatus = " << ChunkServerStatus_Name(chunkserver.status()) << ", diskState = " << DiskState_Name(chunkserver.diskstatus()) @@ -318,13 +321,13 @@ int StatusTool::ChunkServerListCmd() { << OnlineState_Name(chunkserver.onlinestate()) << ", copysetNum = " << copysets.size() << ", mountPoint = " << chunkserver.mountpoint() - << ", diskCapacity = " << chunkserver.diskcapacity() - / curve::mds::kGB << " GB" - << ", diskUsed = " << chunkserver.diskused() - / curve::mds::kGB << " GB"; + << ", diskCapacity = " + << chunkserver.diskcapacity() / curve::mds::kGB << " GB" + << ", diskUsed = " << chunkserver.diskused() / curve::mds::kGB + << " GB"; if (FLAGS_checkHealth) { - std::cout << ", unhealthyCopysetRatio = " - << unhealthyRatio * 100 << "%"; + std::cout << ", unhealthyCopysetRatio = " << unhealthyRatio * 100 + << "%"; } if (chunkserver.has_externalip()) { std::cout << ", externalIP = " << chunkserver.externalip(); @@ -333,7 +336,7 @@ int StatusTool::ChunkServerListCmd() { } std::cout << "total: " << total << ", online: " << online; if (!FLAGS_checkCSAlive) { - std::cout <<", unstable: " << unstable; + std::cout << ", unstable: " << unstable; } std::cout << ", offline: " << offline << std::endl; @@ -378,8 +381,8 @@ int StatusTool::LogicalPoolListCmd() { uint64_t total = 0; uint64_t allocSize; AllocMap allocMap; - res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, - &allocSize, &allocMap); + res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &allocSize, + &allocMap); if (res != 0) { std::cout << "GetAllocatedSize of recycle bin fail!" << std::endl; return -1; @@ -417,15 +420,17 @@ int StatusTool::LogicalPoolListCmd() { << curve::mds::topology::LogicalPoolType_Name(lgPool.type()) << ", scanEnable = " << lgPool.scanenable() << ", allocateStatus = " - << curve::mds::topology:: - AllocateStatus_Name(lgPool.allocatestatus()) + << curve::mds::topology::AllocateStatus_Name( + lgPool.allocatestatus()) << ", total space = " << totalSize / curve::mds::kGB << "GB" << ", used space = " << usedSize / curve::mds::kGB << "GB" - << "(" << usedRatio * 100 << "%, can be recycled = " - << canBeRecycle / curve::mds::kGB << "GB" - << "(" << recycleRatio * 100 << "%))" << ", left space = " - << (totalSize - usedSize) / curve::mds::kGB - << "GB(" << (1 - usedRatio) * 100 << "%)" << std::endl; + << "(" << usedRatio * 100 + << "%, can be recycled = " << canBeRecycle / curve::mds::kGB + << "GB" + << "(" << recycleRatio * 100 << "%))" + << ", left space = " + << (totalSize - usedSize) / curve::mds::kGB << "GB(" + << (1 - usedRatio) * 100 << "%)" << std::endl; } std::cout << "total: " << total << std::endl; return 0; @@ -469,9 +474,7 @@ int StatusTool::StatusCmd() { } } -int StatusTool::ChunkServerStatusCmd() { - return PrintChunkserverStatus(false); -} +int StatusTool::ChunkServerStatusCmd() { return PrintChunkserverStatus(false); } int StatusTool::PrintClusterStatus() { int ret = 0; @@ -486,8 +489,8 @@ int StatusTool::PrintClusterStatus() { const auto& statistics = copysetCheckCore_->GetCopysetStatistics(); std::cout << "total copysets: " << statistics.totalNum << ", unhealthy copysets: " << statistics.unhealthyNum - << ", unhealthy_ratio: " - << statistics.unhealthyRatio * 100 << "%" << std::endl; + << ", unhealthy_ratio: " << statistics.unhealthyRatio * 100 << "%" + << std::endl; std::vector phyPools; std::vector lgPools; int res = GetPoolsInCluster(&phyPools, &lgPools); @@ -506,24 +509,24 @@ int StatusTool::PrintClusterStatus() { bool StatusTool::IsClusterHeatlhy() { bool ret = true; - // 1、检查copyset健康状态 + // 1. Check the health status of copyset int res = copysetCheckCore_->CheckCopysetsInCluster(); if (res != 0) { std::cout << "Copysets are not healthy!" << std::endl; ret = false; } - // 2、检查mds状态 + // 2. Check the mds status if (!CheckServiceHealthy(ServiceName::kMds)) { ret = false; } - // 3、检查etcd在线状态 + // 3. Check the online status of ETCD if (!CheckServiceHealthy(ServiceName::kEtcd)) { ret = false; } - // 4、检查snapshot clone server状态 + // 4. Check the status of the snapshot clone server if (!noSnapshotServer_ && !CheckServiceHealthy(ServiceName::kSnapshotCloneServer)) { ret = false; @@ -542,10 +545,10 @@ bool StatusTool::CheckServiceHealthy(const ServiceName& name) { break; } case ServiceName::kEtcd: { - int res = etcdClient_->GetEtcdClusterStatus(&leaderVec, - &onlineStatus); + int res = + etcdClient_->GetEtcdClusterStatus(&leaderVec, &onlineStatus); if (res != 0) { - std:: cout << "GetEtcdClusterStatus fail!" << std::endl; + std::cout << "GetEtcdClusterStatus fail!" << std::endl; return false; } break; @@ -579,8 +582,8 @@ bool StatusTool::CheckServiceHealthy(const ServiceName& name) { return ret; } -void StatusTool::PrintOnlineStatus(const std::string& name, - const std::map& onlineStatus) { +void StatusTool::PrintOnlineStatus( + const std::string& name, const std::map& onlineStatus) { std::vector online; std::vector offline; for (const auto& item : onlineStatus) { @@ -674,8 +677,8 @@ int StatusTool::PrintSnapshotCloneStatus() { } std::string version; std::vector failedList; - int res = versionTool_->GetAndCheckSnapshotCloneVersion(&version, - &failedList); + int res = + versionTool_->GetAndCheckSnapshotCloneVersion(&version, &failedList); int ret = 0; if (res != 0) { std::cout << "GetAndCheckSnapshotCloneVersion fail" << std::endl; @@ -710,7 +713,7 @@ int StatusTool::PrintClientStatus() { if (!first) { std::cout << ", "; } - std::cout << "version-" << item2.first << ": " + std::cout << "version-" << item2.first << ": " << item2.second.size(); first = false; } @@ -746,13 +749,12 @@ int StatusTool::ScanStatusCmd() { return -1; } - std::cout - << "Scan status for copyset(" - << lpid << "," << copysetId << "):" << std::endl - << " scaning=" << copysetInfo.scaning() - << " lastScanSec=" << copysetInfo.lastscansec() - << " lastScanConsistent=" << copysetInfo.lastscanconsistent() - << std::endl; + std::cout << "Scan status for copyset(" << lpid << "," << copysetId + << "):" << std::endl + << " scaning=" << copysetInfo.scaning() + << " lastScanSec=" << copysetInfo.lastscansec() + << " lastScanConsistent=" << copysetInfo.lastscanconsistent() + << std::endl; return 0; } @@ -769,8 +771,8 @@ int StatusTool::ScanStatusCmd() { if (count % 5 == 0) { std::cout << std::endl; } - std::cout << " (" << copysetInfo.logicalpoolid() - << "," << copysetInfo.copysetid() << ")"; + std::cout << " (" << copysetInfo.logicalpoolid() << "," + << copysetInfo.copysetid() << ")"; count++; } @@ -779,47 +781,47 @@ int StatusTool::ScanStatusCmd() { return 0; } -int CheckUseWalPool(const std::map> - &poolChunkservers, - bool *useWalPool, - bool *useChunkFilePoolAsWalPool, - std::shared_ptr metricClient) { +int CheckUseWalPool( + const std::map>& poolChunkservers, + bool* useWalPool, bool* useChunkFilePoolAsWalPool, + std::shared_ptr metricClient) { int ret = 0; if (!poolChunkservers.empty()) { ChunkServerInfo chunkserver = poolChunkservers.begin()->second[0]; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); // check whether use chunkfilepool std::string metricValue; std::string metricName = GetUseWalPoolName(csAddr); - MetricRet res = metricClient->GetConfValueFromMetric(csAddr, - metricName, &metricValue); + MetricRet res = metricClient->GetConfValueFromMetric(csAddr, metricName, + &metricValue); if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool conf " - << csAddr << " fail!" << std::endl; + std::cout << "Get use chunkfilepool conf " << csAddr << " fail!" + << std::endl; ret = -1; } std::string raftLogProtocol = curve::common::UriParser ::GetProtocolFromUri(metricValue); - *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; + *useWalPool = kProtocalCurve == raftLogProtocol ? true : false; - // check whether use chunkfilepool as walpool from chunkserver conf metric // NOLINT + // check whether use chunkfilepool as walpool // NOLINT + // from chunkserver conf metric // NOLINT metricName = GetUseChunkFilePoolAsWalPoolName(csAddr); res = metricClient->GetConfValueFromMetric(csAddr, metricName, - &metricValue); + &metricValue); if (res != MetricRet::kOK) { - std::cout << "Get use chunkfilepool as walpool conf " - << csAddr << " fail!" << std::endl; + std::cout << "Get use chunkfilepool as walpool conf " << csAddr + << " fail!" << std::endl; ret = -1; } - *useChunkFilePoolAsWalPool = StringToBool(metricValue, - useChunkFilePoolAsWalPool); + *useChunkFilePoolAsWalPool = + StringToBool(metricValue, useChunkFilePoolAsWalPool); } return ret; } int PrintChunkserverOnlineStatus( - const std::map> &poolChunkservers, + const std::map>& poolChunkservers, std::shared_ptr copysetCheckCore, std::shared_ptr mdsClient) { int ret = 0; @@ -830,8 +832,8 @@ int PrintChunkserverOnlineStatus( for (const auto& poolChunkserver : poolChunkservers) { for (const auto& chunkserver : poolChunkserver.second) { total++; - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); if (copysetCheckCore->CheckChunkServerOnline(csAddr)) { online++; } else { @@ -844,11 +846,11 @@ int PrintChunkserverOnlineStatus( std::vector offlineRecover; if (offlineCs.size() > 0) { std::map statusMap; - int res = mdsClient->QueryChunkServerRecoverStatus( - offlineCs, &statusMap); + int res = + mdsClient->QueryChunkServerRecoverStatus(offlineCs, &statusMap); if (res != 0) { std::cout << "query offlinne chunkserver recover status fail"; - ret = -1; + ret = -1; } else { // Distinguish between recovering and unrecovered for (auto it = statusMap.begin(); it != statusMap.end(); ++it) { @@ -858,14 +860,13 @@ int PrintChunkserverOnlineStatus( } } } - std::cout << "chunkserver: total num = " << total - << ", online = " << online - << ", offline = " << offline - << "(recoveringout = " << offlineRecover.size() - << ", chunkserverlist: ["; + std::cout << "chunkserver: total num = " << total << ", online = " << online + << ", offline = " << offline + << "(recoveringout = " << offlineRecover.size() + << ", chunkserverlist: ["; int i = 0; - for (ChunkServerIdType csId : offlineRecover) { + for (ChunkServerIdType csId : offlineRecover) { i++; if (i == static_cast(offlineRecover.size())) { std::cout << csId; @@ -878,26 +879,25 @@ int PrintChunkserverOnlineStatus( } int GetChunkserverLeftSize( - const std::map> &poolChunkservers, - std::map> *poolChunkLeftSize, - std::map> *poolWalSegmentLeftSize, - bool useWalPool, - bool useChunkFilePoolAsWalPool, + const std::map>& poolChunkservers, + std::map>* poolChunkLeftSize, + std::map>* poolWalSegmentLeftSize, + bool useWalPool, bool useChunkFilePoolAsWalPool, std::shared_ptr metricClient) { int ret = 0; for (const auto& poolChunkserver : poolChunkservers) { std::vector chunkLeftSize; std::vector walSegmentLeftSize; for (const auto& chunkserver : poolChunkserver.second) { - std::string csAddr = chunkserver.hostip() - + ":" + std::to_string(chunkserver.port()); + std::string csAddr = + chunkserver.hostip() + ":" + std::to_string(chunkserver.port()); std::string metricName = GetCSLeftChunkName(csAddr); uint64_t chunkNum; - MetricRet res = metricClient->GetMetricUint(csAddr, - metricName, &chunkNum); + MetricRet res = + metricClient->GetMetricUint(csAddr, metricName, &chunkNum); if (res != MetricRet::kOK) { std::cout << "Get left chunk size of chunkserver " << csAddr - << " fail!" << std::endl; + << " fail!" << std::endl; ret = -1; continue; } @@ -909,10 +909,10 @@ int GetChunkserverLeftSize( metricName = GetCSLeftWalSegmentName(csAddr); uint64_t walSegmentNum; res = metricClient->GetMetricUint(csAddr, metricName, - &walSegmentNum); + &walSegmentNum); if (res != MetricRet::kOK) { std::cout << "Get left wal segment size of chunkserver " - << csAddr << " fail!" << std::endl; + << csAddr << " fail!" << std::endl; ret = -1; continue; } @@ -922,7 +922,7 @@ int GetChunkserverLeftSize( } poolChunkLeftSize->emplace(poolChunkserver.first, chunkLeftSize); poolWalSegmentLeftSize->emplace(poolChunkserver.first, - walSegmentLeftSize); + walSegmentLeftSize); } return ret; } @@ -932,8 +932,8 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { std::cout << "ChunkServer status:" << std::endl; std::string version; std::vector failedList; - int res = versionTool_->GetAndCheckChunkServerVersion(&version, - &failedList); + int res = + versionTool_->GetAndCheckChunkServerVersion(&version, &failedList); int ret = 0; if (res != 0) { std::cout << "GetAndCheckChunkserverVersion fail" << std::endl; @@ -954,8 +954,7 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { } // get chunkserver online status - ret = PrintChunkserverOnlineStatus(poolChunkservers, - copysetCheckCore_, + ret = PrintChunkserverOnlineStatus(poolChunkservers, copysetCheckCore_, mdsClient_); if (!checkLeftSize) { return ret; @@ -970,12 +969,9 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { // get chunkserver left size std::map> poolChunkLeftSize; std::map> poolWalSegmentLeftSize; - ret = GetChunkserverLeftSize(poolChunkservers, - &poolChunkLeftSize, - &poolWalSegmentLeftSize, - useWalPool, - useChunkFilePoolAsWalPool, - metricClient_); + ret = GetChunkserverLeftSize(poolChunkservers, &poolChunkLeftSize, + &poolWalSegmentLeftSize, useWalPool, + useChunkFilePoolAsWalPool, metricClient_); if (0 != ret) { return ret; } @@ -993,9 +989,9 @@ int StatusTool::PrintChunkserverStatus(bool checkLeftSize) { return ret; } -void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, - const std::map>& poolLeftSize) { +void StatusTool::PrintCsLeftSizeStatistics( + const std::string& name, + const std::map>& poolLeftSize) { if (poolLeftSize.empty()) { std::cout << "No " << name << " left size found!" << std::endl; return; @@ -1024,19 +1020,19 @@ void StatusTool::PrintCsLeftSizeStatistics(const std::string& name, } double var = sum / leftSize.second.size(); - std:: cout.setf(std::ios::fixed); - std::cout<< std::setprecision(2); - std::cout<< "pool" << leftSize.first << " " << name; + std::cout.setf(std::ios::fixed); + std::cout << std::setprecision(2); + std::cout << "pool" << leftSize.first << " " << name; std::cout << " left size: min = " << min << "GB" - << ", max = " << max << "GB" - << ", average = " << avg << "GB" - << ", range = " << range << "GB" - << ", variance = " << var << std::endl; + << ", max = " << max << "GB" + << ", average = " << avg << "GB" + << ", range = " << range << "GB" + << ", variance = " << var << std::endl; } } int StatusTool::GetPoolsInCluster(std::vector* phyPools, - std::vector* lgPools) { + std::vector* lgPools) { int res = mdsClient_->ListPhysicalPoolsInCluster(phyPools); if (res != 0) { std::cout << "ListPhysicalPoolsInCluster fail!" << std::endl; @@ -1044,7 +1040,7 @@ int StatusTool::GetPoolsInCluster(std::vector* phyPools, } for (const auto& phyPool : *phyPools) { int res = mdsClient_->ListLogicalPoolsInPhysicalPool( - phyPool.physicalpoolid(), lgPools) != 0; + phyPool.physicalpoolid(), lgPools) != 0; if (res != 0) { std::cout << "ListLogicalPoolsInPhysicalPool fail!" << std::endl; return -1; @@ -1066,9 +1062,9 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { std::cout << "Get root directory file size from mds fail!" << std::endl; return -1; } - // 从metric获取space信息 + // Obtain space information from metric for (const auto& lgPool : lgPools) { - LogicalpoolSpaceInfo lpinfo; + LogicalpoolSpaceInfo lpinfo; std::string poolName = lgPool.logicalpoolname(); lpinfo.poolName = poolName; std::string metricName = GetPoolTotalChunkSizeName(poolName); @@ -1079,7 +1075,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { return -1; } spaceInfo->totalChunkSize += size; - lpinfo.totalChunkSize +=size; + lpinfo.totalChunkSize += size; metricName = GetPoolUsedChunkSizeName(poolName); res = mdsClient_->GetMetric(metricName, &size); if (res != 0) { @@ -1105,10 +1101,10 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { spaceInfo->allocatedSize += size; lpinfo.allocatedSize += size; spaceInfo->lpoolspaceinfo.insert( - std::pair( - lgPool.logicalpoolid(), lpinfo)); + std::pair(lgPool.logicalpoolid(), + lpinfo)); } - // 获取RecycleBin的分配大小 + // Obtain the allocation size of RecycleBin res = mdsClient_->GetAllocatedSize(curve::mds::RECYCLEBINDIR, &spaceInfo->recycleAllocSize); if (res != 0) { @@ -1118,7 +1114,7 @@ int StatusTool::GetSpaceInfo(SpaceInfo* spaceInfo) { return 0; } -int StatusTool::RunCommand(const std::string &cmd) { +int StatusTool::RunCommand(const std::string& cmd) { if (Init(cmd) != 0) { std::cout << "Init StatusTool failed" << std::endl; return -1; diff --git a/src/tools/status_tool.h b/src/tools/status_tool.h index 82b776fa73..ea68db615c 100644 --- a/src/tools/status_tool.h +++ b/src/tools/status_tool.h @@ -23,31 +23,33 @@ #ifndef SRC_TOOLS_STATUS_TOOL_H_ #define SRC_TOOLS_STATUS_TOOL_H_ +#include #include #include -#include -#include + #include -#include -#include -#include +#include #include +#include +#include #include +#include + #include "proto/topology.pb.h" #include "src/common/timeutility.h" +#include "src/common/uri_parser.h" #include "src/mds/common/mds_define.h" -#include "src/tools/mds_client.h" #include "src/tools/chunkserver_client.h" -#include "src/tools/namespace_tool_core.h" #include "src/tools/copyset_check_core.h" -#include "src/tools/etcd_client.h" -#include "src/tools/version_tool.h" #include "src/tools/curve_tool.h" #include "src/tools/curve_tool_define.h" +#include "src/tools/etcd_client.h" +#include "src/tools/mds_client.h" #include "src/tools/metric_client.h" #include "src/tools/metric_name.h" +#include "src/tools/namespace_tool_core.h" #include "src/tools/snapshot_clone_client.h" -#include "src/common/uri_parser.h" +#include "src/tools/version_tool.h" using curve::mds::topology::ChunkServerInfo; using curve::mds::topology::ChunkServerStatus; @@ -63,22 +65,22 @@ struct LogicalpoolSpaceInfo { std::string poolName = ""; uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; }; struct SpaceInfo { uint64_t totalChunkSize = 0; uint64_t usedChunkSize = 0; - // 总体能容纳的文件大小 + // The overall file size that can be accommodated uint64_t totalCapacity = 0; - // 分配大小 + // Allocation size uint64_t allocatedSize = 0; - // recycleBin的分配大小 + // Allocation size of recycleBin uint64_t recycleAllocSize = 0; - // 系统中存在的文件大小 + // File size present in the system uint64_t currentFileSize = 0; std::unordered_map lpoolspaceinfo; }; @@ -100,49 +102,54 @@ class StatusTool : public CurveTool { std::shared_ptr versionTool, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), copysetCheckCore_(copysetCheckCore), - etcdClient_(etcdClient), metricClient_(metricClient), - snapshotClient_(snapshotClient), versionTool_(versionTool), - mdsInited_(false), etcdInited_(false), noSnapshotServer_(false) {} + : mdsClient_(mdsClient), + copysetCheckCore_(copysetCheckCore), + etcdClient_(etcdClient), + metricClient_(metricClient), + snapshotClient_(snapshotClient), + versionTool_(versionTool), + mdsInited_(false), + etcdInited_(false), + noSnapshotServer_(false) {} ~StatusTool() = default; /** - * @brief 打印help信息 - * @param cmd:执行的命令 - * @return 无 + * @brief Print help information + * @param cmd: Command executed + * @return None */ - void PrintHelp(const std::string &command) override; + void PrintHelp(const std::string& command) override; /** - * @brief 执行命令 - * @param cmd:执行的命令 - * @return 成功返回0,失败返回-1 + * @brief Execute command + * @param cmd: Command executed + * @return returns 0 for success, -1 for failure */ - int RunCommand(const std::string &command) override; + int RunCommand(const std::string& command) override; /** - * @brief 返回是否支持该命令 - * @param command:执行的命令 - * @return true / false + * @brief Returns whether the command is supported + * @param command: The command executed + * @return true/false */ - static bool SupportCommand(const std::string &command); + static bool SupportCommand(const std::string& command); /** - * @brief 判断集群是否健康 + * @brief Determine whether the cluster is healthy */ bool IsClusterHeatlhy(); private: - int Init(const std::string &command); + int Init(const std::string& command); int SpaceCmd(); int StatusCmd(); int ChunkServerListCmd(); int ServerListCmd(); int LogicalPoolListCmd(); int ChunkServerStatusCmd(); - int GetPoolsInCluster(std::vector *phyPools, - std::vector *lgPools); - int GetSpaceInfo(SpaceInfo *spaceInfo); + int GetPoolsInCluster(std::vector* phyPools, + std::vector* lgPools); + int GetSpaceInfo(SpaceInfo* spaceInfo); int PrintClusterStatus(); int PrintMdsStatus(); int PrintEtcdStatus(); @@ -152,67 +159,68 @@ class StatusTool : public CurveTool { int ScanStatusCmd(); int FormatStatusCmd(); void PrintCsLeftSizeStatistics( - const std::string &name, - const std::map> &poolLeftSize); + const std::string& name, + const std::map>& poolLeftSize); int PrintSnapshotCloneStatus(); /** - * @brief 判断命令是否需要和etcd交互 - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief Determine whether the command needs to interact with etcd + * @param command: The command to be executed + * @return Returns true if interaction is needed, otherwise returns false */ - bool CommandNeedEtcd(const std::string &command); - + bool CommandNeedEtcd(const std::string& command); /** - * @brief 判断命令是否需要mds - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief Determine if the command requires mds + * @param command: The command executed + * @return Returns true if mds is needed, otherwise returns false */ - bool CommandNeedMds(const std::string &command); + bool CommandNeedMds(const std::string& command); /** - * @brief 判断命令是否需要snapshot clone server - * @param command:执行的命令 - * @return 需要返回true,否则返回false + * @brief: Determine if the command requires a snapshot clone server + * @param command: The command executed + * @return Returns true if snapshot clone server is needed, otherwise + * returns false */ - bool CommandNeedSnapshotClone(const std::string &command); + bool CommandNeedSnapshotClone(const std::string& command); /** - * @brief 打印在线状态 - * @param name : 在线状态对应的名字 - * @param onlineStatus 在线状态的map + * @brief Print online status + * @param name: The name corresponding to the online status + * @param onlineStatus: Map of online status */ - void PrintOnlineStatus(const std::string &name, - const std::map &onlineStatus); + void PrintOnlineStatus(const std::string& name, + const std::map& onlineStatus); /** - * @brief 获取并打印mds version信息 + * @brief Get and print mds version information */ int GetAndPrintMdsVersion(); /** - * @brief 检查服务是否健康 - * @param name 服务名 + * @brief Check if the service is healthy + * @param name: Service Name */ - bool CheckServiceHealthy(const ServiceName &name); + bool CheckServiceHealthy(const ServiceName& name); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // Copyset检查工具,用于检查集群和chunkserver的健康状态 + // Copyset checking tool, used to check the health status of clusters and + // chunkservers std::shared_ptr copysetCheckCore_; - // etcd client,用于调etcd API获取状态 + // ETCD client, used to call the ETCD API to obtain status std::shared_ptr etcdClient_; - // 用于获取metric + // Used to obtain metric std::shared_ptr metricClient_; - // 用于获取snapshot clone的状态 + // Used to obtain the status of snapshot clones std::shared_ptr snapshotClient_; - // version client,用于获取version信息 + // Version client, used to obtain version information std::shared_ptr versionTool_; - // mds是否初始化过 + // Has the mds been initialized bool mdsInited_; - // etcd是否初始化过 + // Has ETCD been initialized bool etcdInited_; // Is there a snapshot service or not bool noSnapshotServer_; diff --git a/src/tools/version_tool.cpp b/src/tools/version_tool.cpp index 6e519bab4a..42b1d3e9a5 100644 --- a/src/tools/version_tool.cpp +++ b/src/tools/version_tool.cpp @@ -48,8 +48,8 @@ int VersionTool::GetAndCheckMdsVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckChunkServerVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList) { std::vector chunkServers; int res = mdsClient_->ListChunkServersInCluster(&chunkServers); if (res != 0) { @@ -78,8 +78,8 @@ int VersionTool::GetAndCheckChunkServerVersion(std::string* version, return ret; } -int VersionTool::GetAndCheckSnapshotCloneVersion(std::string* version, - std::vector* failedList) { +int VersionTool::GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList) { const auto& dummyServerMap = snapshotClient_->GetDummyServerMap(); std::vector dummyServers; for (const auto& item : dummyServerMap) { @@ -123,9 +123,8 @@ void VersionTool::FetchClientProcessMap(const std::vector& addrVec, ProcessMapType* processMap) { for (const auto& addr : addrVec) { std::string cmd; - MetricRet res = metricClient_->GetMetric(addr, - kProcessCmdLineMetricName, - &cmd); + MetricRet res = + metricClient_->GetMetric(addr, kProcessCmdLineMetricName, &cmd); if (res != MetricRet::kOK) { continue; } @@ -156,10 +155,11 @@ void VersionTool::GetVersionMap(const std::vector& addrVec, failedList->clear(); for (const auto& addr : addrVec) { std::string version; - MetricRet res = metricClient_->GetMetric(addr, kCurveVersionMetricName, - &version); + MetricRet res = + metricClient_->GetMetric(addr, kCurveVersionMetricName, &version); if (res != MetricRet::kOK) { - // 0.0.5.2版本之前没有curve_version的metric,因此再判断一下 + // Before version 0.0.5.2, there was no "curve_version" metric, so + // let's double-check. if (res == MetricRet::kNotFound) { version = kOldVersion; } else { diff --git a/src/tools/version_tool.h b/src/tools/version_tool.h index 9231d1e4fc..01cd05a6c8 100644 --- a/src/tools/version_tool.h +++ b/src/tools/version_tool.h @@ -23,13 +23,14 @@ #ifndef SRC_TOOLS_VERSION_TOOL_H_ #define SRC_TOOLS_VERSION_TOOL_H_ -#include #include -#include #include +#include +#include + +#include "src/common/string_util.h" #include "src/tools/mds_client.h" #include "src/tools/metric_client.h" -#include "src/common/string_util.h" #include "src/tools/snapshot_clone_client.h" namespace curve { @@ -49,95 +50,97 @@ class VersionTool { explicit VersionTool(std::shared_ptr mdsClient, std::shared_ptr metricClient, std::shared_ptr snapshotClient) - : mdsClient_(mdsClient), snapshotClient_(snapshotClient), + : mdsClient_(mdsClient), + snapshotClient_(snapshotClient), metricClient_(metricClient) {} virtual ~VersionTool() {} /** - * @brief 获取mds的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of mds and check version consistency + * @param[out] version: Version + * @return returns 0 for success, -1 for failure */ - virtual int GetAndCheckMdsVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckMdsVersion(std::string* version, + std::vector* failedList); /** - * @brief 获取chunkserver的版本并检查版本一致性 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of chunkserver and check version consistency + * @param[out] version: Version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckChunkServerVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckChunkServerVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取snapshot clone server的版本 - * @param[out] version 版本 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the snapshot clone server + * @param[out] version: Version + * @return returns 0 for success, -1 for failure */ - virtual int - GetAndCheckSnapshotCloneVersion(std::string *version, - std::vector *failedList); + virtual int GetAndCheckSnapshotCloneVersion( + std::string* version, std::vector* failedList); /** - * @brief 获取client的版本 - * @param[out] versionMap process->版本->地址的映射表 - * @return 成功返回0,失败返回-1 + * @brief Get the version of the client + * @param[out] versionMap: Process ->Version ->Address mapping table + * @return returns 0 for success, -1 for failure */ - virtual int GetClientVersion(ClientVersionMapType *versionMap); + virtual int GetClientVersion(ClientVersionMapType* versionMap); /** - * @brief 打印每个version对应的地址 - * @param versionMap version到地址列表的map + * @brief Print the address corresponding to each version + * @param versionMap: Version to address list map */ - static void PrintVersionMap(const VersionMapType &versionMap); + static void PrintVersionMap(const VersionMapType& versionMap); /** - * @brief 打印访问失败的地址 - * @param failedList 访问失败的地址列表 + * @brief Print access failed addresses + * @param failedList: Access Failed Address List */ - static void PrintFailedList(const std::vector &failedList); + static void PrintFailedList(const std::vector& failedList); private: /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] versionMap version到地址的map - * @param[out] failedList 查询version失败的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec: Address List + * @param[out] versionMap: Version to address map + * @param[out] failedList: Query address list for version failure */ - void GetVersionMap(const std::vector &addrVec, - VersionMapType *versionMap, - std::vector *failedList); + void GetVersionMap(const std::vector& addrVec, + VersionMapType* versionMap, + std::vector* failedList); /** - * @brief 获取addrVec对应地址的version,并把version和地址对应关系存在map中 - * @param addrVec 地址列表 - * @param[out] processMap 不同的process对应的client的地址列表 + * @brief Obtain the version of the address corresponding to addrVec and + * store the version and address correspondence in the map + * @param addrVec Address List + * @param[out] processMap The address list of clients corresponding to + * different processes */ - void FetchClientProcessMap(const std::vector &addrVec, - ProcessMapType *processMap); + void FetchClientProcessMap(const std::vector& addrVec, + ProcessMapType* processMap); /** - * @brief 从启动server的命令行获取对应的程序的名字 - * 比如nebd的命令行为 + * @brief Get the name of the corresponding program from the command line of + * starting the server For example, the command behavior of nebd * process_cmdline : "/usr/bin/nebd-server * -confPath=/etc/nebd/nebd-server.conf * -log_dir=/data/log/nebd/server * -graceful_quit_on_sigterm=true * -stderrthreshold=3 * " - * 那么我们要解析出的名字是nebd-server - * @param addrVec 地址列表 - * @return 进程的名字 + * So the name we need to resolve is nebd-server + * @param addrVec: Address List + * @return The name of the process */ - std::string GetProcessNameFromCmd(const std::string &cmd); + std::string GetProcessNameFromCmd(const std::string& cmd); private: - // 向mds发送RPC的client + // Client sending RPC to mds std::shared_ptr mdsClient_; - // 用于获取snapshotClone状态 + // Used to obtain snapshotClone status std::shared_ptr snapshotClient_; - // 获取metric的client + // Obtain metric client std::shared_ptr metricClient_; }; diff --git a/test/chunkserver/client.cpp b/test/chunkserver/client.cpp index 7f8c2e6243..dfc3ca4a99 100644 --- a/test/chunkserver/client.cpp +++ b/test/chunkserver/client.cpp @@ -20,49 +20,47 @@ * Author: wudemiao */ -#include -#include -#include #include #include +#include +#include +#include -#include "src/chunkserver/copyset_node.h" #include "proto/chunk.pb.h" #include "proto/copyset.pb.h" #include "src/chunkserver/cli.h" +#include "src/chunkserver/copyset_node.h" #include "test/chunkserver/chunkserver_test_util.h" DEFINE_int32(request_size, 10, "Size of each requst"); DEFINE_int32(timeout_ms, 500, "Timeout for each request"); DEFINE_int32(election_timeout_ms, 3000, "election timeout ms"); DEFINE_int32(write_percentage, 100, "Percentage of fetch_add"); -DEFINE_string(confs, - "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", +DEFINE_string(confs, "127.0.0.1:18200:0,127.0.0.1:18201:0,127.0.0.1:18202:0", "Configuration of the raft group"); -using curve::chunkserver::CopysetRequest; -using curve::chunkserver::CopysetResponse; -using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::CHUNK_OP_STATUS; +using curve::chunkserver::CHUNK_OP_TYPE; using curve::chunkserver::ChunkRequest; using curve::chunkserver::ChunkResponse; using curve::chunkserver::ChunkService_Stub; -using curve::chunkserver::PeerId; -using curve::chunkserver::LogicPoolID; -using curve::chunkserver::CopysetID; using curve::chunkserver::Configuration; -using curve::chunkserver::CHUNK_OP_TYPE; -using curve::chunkserver::CHUNK_OP_STATUS; using curve::chunkserver::COPYSET_OP_STATUS; +using curve::chunkserver::CopysetID; +using curve::chunkserver::CopysetRequest; +using curve::chunkserver::CopysetResponse; +using curve::chunkserver::CopysetService_Stub; +using curve::chunkserver::LogicPoolID; +using curve::chunkserver::PeerId; -int main(int argc, char *argv[]) { +int main(int argc, char* argv[]) { gflags::ParseCommandLineFlags(&argc, &argv, true); - LogicPoolID logicPoolId = 1; - CopysetID copysetId = 100001; - uint64_t chunkId = 1; - uint64_t sn = 1; - char fillCh = 'a'; + CopysetID copysetId = 100001; + uint64_t chunkId = 1; + uint64_t sn = 1; + char fillCh = 'a'; PeerId leader; curve::chunkserver::Configuration conf; @@ -70,9 +68,7 @@ int main(int argc, char *argv[]) { LOG(FATAL) << "conf parse failed: " << FLAGS_confs; } - - - // 创建 copyset + // Create copyset { std::vector peers; conf.list_peers(&peers); @@ -105,8 +101,10 @@ int main(int argc, char *argv[]) { if (cntl.Failed()) { LOG(FATAL) << "create copyset fialed: " << cntl.ErrorText(); } - if (response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS //NOLINT - || response.status() == COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { //NOLINT + if (response.status() == // NOLINT + COPYSET_OP_STATUS::COPYSET_OP_STATUS_SUCCESS // NOLINT + || response.status() == // NOLINT + COPYSET_OP_STATUS::COPYSET_OP_STATUS_EXIST) { // NOLINT LOG(INFO) << "create copyset success: " << response.status(); } else { LOG(FATAL) << "create copyset failed: "; @@ -116,11 +114,9 @@ int main(int argc, char *argv[]) { // wait leader ::usleep(1000 * FLAGS_election_timeout_ms); - butil::Status status = curve::chunkserver::WaitLeader(logicPoolId, - copysetId, - conf, - &leader, - FLAGS_election_timeout_ms); //NOLINT + butil::Status status = + curve::chunkserver::WaitLeader(logicPoolId, copysetId, conf, &leader, + FLAGS_election_timeout_ms); // NOLINT LOG(INFO) << "leader is: " << leader.to_string(); if (0 != status.error_code()) { LOG(FATAL) << "Wait leader failed"; @@ -176,8 +172,5 @@ int main(int argc, char *argv[]) { } } - return 0; } - - diff --git a/test/chunkserver/trash_test.cpp b/test/chunkserver/trash_test.cpp index 3ddf32f27e..2c28a6015c 100644 --- a/test/chunkserver/trash_test.cpp +++ b/test/chunkserver/trash_test.cpp @@ -512,7 +512,7 @@ TEST_F(TrashTest, recycle_wal_failed) { "curve_log_inprogress_10088")) .WillOnce(Return(-1)); - // 失败的情况下不应删除 + // Should not be deleted in case of failure EXPECT_CALL(*lfs, Delete("./runlog/trash_test0/trash/4294967493.55555")) .Times(0); @@ -556,8 +556,7 @@ TEST_F(TrashTest, recycle_copyset_dir_list_err) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(-1)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(-1)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -569,8 +568,7 @@ TEST_F(TrashTest, recycle_copyset_dir_ok) { .WillOnce(Return(false)); EXPECT_CALL(*lfs, Mkdir(trashPath)).WillOnce(Return(0)); EXPECT_CALL(*lfs, Rename(dirPath, _, 0)).WillOnce(Return(0)); - EXPECT_CALL(*lfs, List(_, _)) - .WillOnce(Return(0)); + EXPECT_CALL(*lfs, List(_, _)).WillOnce(Return(0)); ASSERT_EQ(0, trash->RecycleCopySet(dirPath)); } @@ -607,18 +605,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // chunk_200_snap_1, abc +1 // log/ - using item4list = struct{ + using item4list = struct { std::string subdir; std::vector& names; }; std::vector action4List{ - { "", copysets }, - { "/4294967493.55555", dirs}, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, - { "/4294967494.55555", dirs}, - { "/4294967494.55555/data", chunks2 }, - { "/4294967494.55555/log", logfiles2 }, + {"", copysets}, + {"/4294967493.55555", dirs}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, + {"/4294967494.55555", dirs}, + {"/4294967494.55555/data", chunks2}, + {"/4294967494.55555/log", logfiles2}, }; for (auto& it : action4List) { @@ -627,18 +625,18 @@ TEST_F(TrashTest, test_chunk_num_statistic) { } EXPECT_CALL(*lfs, DirExists(_)) - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_100 - .WillOnce(Return(false)) // chunk_101 - .WillOnce(Return(true)) // log - .WillOnce(Return(false)) // curve_log_10086_10087 - .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 - .WillOnce(Return(false)) // log_10083_10084 - .WillOnce(Return(false)) // log_inprogress_10085 - .WillOnce(Return(true)) // data - .WillOnce(Return(false)) // chunk_200_snap_1 - .WillOnce(Return(false)) // abc - .WillOnce(Return(true)); // log + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_100 + .WillOnce(Return(false)) // chunk_101 + .WillOnce(Return(true)) // log + .WillOnce(Return(false)) // curve_log_10086_10087 + .WillOnce(Return(false)) // curve_log_inprogress_10088_10088 + .WillOnce(Return(false)) // log_10083_10084 + .WillOnce(Return(false)) // log_inprogress_10085 + .WillOnce(Return(true)) // data + .WillOnce(Return(false)) // chunk_200_snap_1 + .WillOnce(Return(false)) // abc + .WillOnce(Return(true)); // log trash->Init(ops); ASSERT_EQ(5, trash->GetChunkNum()); @@ -657,14 +655,14 @@ TEST_F(TrashTest, test_chunk_num_statistic) { EXPECT_CALL(*lfs, DirExists(_)) .WillOnce(Return(true)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // data .WillOnce(Return(false)) .WillOnce(Return(false)) - .WillOnce(Return(true)) // log + .WillOnce(Return(true)) // log .WillOnce(Return(false)) - .WillOnce(Return(true)) // raft_snapshot - .WillOnce(Return(true)) // temp - .WillOnce(Return(true)) // data + .WillOnce(Return(true)) // raft_snapshot + .WillOnce(Return(true)) // temp + .WillOnce(Return(true)) // data .WillOnce(Return(false)); std::string trashedCopysetDir = "/trash_test0/copysets/4294967495"; @@ -695,21 +693,21 @@ TEST_F(TrashTest, test_chunk_num_statistic) { std::vector raftfiles{RAFT_DATA_DIR, RAFT_LOG_DIR}; // DirExists - using item4dirExists = struct{ + using item4dirExists = struct { std::string subdir; bool exist; }; std::vector action4DirExists{ - { "", true }, - { "/4294967493.55555", true }, - { "/4294967493.55555/data", true }, - { "/4294967493.55555/log", true }, - { "/4294967493.55555/data/chunk_100", false }, - { "/4294967493.55555/data/chunk_101", false }, - { "/4294967493.55555/log/curve_log_10086_10087", false }, - { "/4294967493.55555/log/curve_log_inprogress_10088", false }, - { "/4294967493.55555/log/log_10083_10084", false }, - { "/4294967493.55555/log/log_inprogress_10085", false }, + {"", true}, + {"/4294967493.55555", true}, + {"/4294967493.55555/data", true}, + {"/4294967493.55555/log", true}, + {"/4294967493.55555/data/chunk_100", false}, + {"/4294967493.55555/data/chunk_101", false}, + {"/4294967493.55555/log/curve_log_10086_10087", false}, + {"/4294967493.55555/log/curve_log_inprogress_10088", false}, + {"/4294967493.55555/log/log_10083_10084", false}, + {"/4294967493.55555/log/log_inprogress_10085", false}, }; for (auto& it : action4DirExists) { @@ -719,10 +717,10 @@ TEST_F(TrashTest, test_chunk_num_statistic) { // List std::vector action4List2{ - { "", copysets }, - { "/4294967493.55555", raftfiles }, - { "/4294967493.55555/data", chunks1 }, - { "/4294967493.55555/log", logfiles1 }, + {"", copysets}, + {"/4294967493.55555", raftfiles}, + {"/4294967493.55555/data", chunks1}, + {"/4294967493.55555/log", logfiles1}, }; for (auto& it : action4List2) { @@ -735,16 +733,16 @@ TEST_F(TrashTest, test_chunk_num_statistic) { SetCopysetNeedDelete(trashPath + "/" + copysets[2], notNeedDelete); // RecycleFile - using item4CycleFile = struct{ + using item4CycleFile = struct { std::shared_ptr pool; std::string subdir; int ret; }; std::vector action4CycleFile{ - { pool, "/4294967493.55555/data/chunk_100", 0 }, - { pool, "/4294967493.55555/data/chunk_101", -1 }, - { walPool, "/4294967493.55555/log/curve_log_10086_10087", 0 }, - { walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1 }, + {pool, "/4294967493.55555/data/chunk_100", 0}, + {pool, "/4294967493.55555/data/chunk_101", -1}, + {walPool, "/4294967493.55555/log/curve_log_10086_10087", 0}, + {walPool, "/4294967493.55555/log/curve_log_inprogress_10088", -1}, }; for (auto& it : action4CycleFile) { diff --git a/test/client/client_metric_test.cpp b/test/client/client_metric_test.cpp index 2f092fc79f..4072bd60f4 100644 --- a/test/client/client_metric_test.cpp +++ b/test/client/client_metric_test.cpp @@ -20,37 +20,38 @@ * Author: tongguangxun */ -#include +#include "src/client/client_metric.h" + #include #include +#include -#include // NOLINT -#include // NOLINT -#include // NOLINT -#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT -#include "proto/nameserver2.pb.h" #include "include/client/libcurve.h" -#include "src/client/client_metric.h" -#include "src/client/file_instance.h" -#include "test/client/fake/mock_schedule.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" +#include "proto/nameserver2.pb.h" #include "src/client/client_common.h" #include "src/client/client_config.h" +#include "src/client/file_instance.h" +#include "src/client/libcurve_file.h" +#include "test/client/fake/fakeMDS.h" +#include "test/client/fake/mock_schedule.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" DECLARE_string(chunkserver_list); -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9150"; // NOLINT namespace curve { namespace client { -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9150"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -64,7 +65,7 @@ const std::vector clientConf { }; TEST(MetricTest, ChunkServer_MetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -72,25 +73,26 @@ TEST(MetricTest, ChunkServer_MetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT std::string configpath("./test/client/configs/client_metric.conf"); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); ClientConfig cc; ASSERT_EQ(0, cc.Init(configpath.c_str())); - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -147,13 +149,13 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(8000); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -165,8 +167,8 @@ TEST(MetricTest, ChunkServer_MetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(-2, ret); - - // 4次正确读写,4次超时读写,超时会引起重试,重试次数为3,数据量最大是8192 + // 4 correct reads and writes, 4 timeout reads and writes, timeout will + // cause retries, retry count is 3, and the maximum data volume is 8192 ASSERT_EQ(fm->inflightRPCNum.get_value(), 0); ASSERT_EQ(fm->userRead.qps.count.get_value(), 2); ASSERT_EQ(fm->userWrite.qps.count.get_value(), 2); @@ -204,7 +206,7 @@ void cb(CurveAioContext* ctx) { } // namespace TEST(MetricTest, SlowRequestMetricTest) { - MetaServerOption metaopt; + MetaServerOption metaopt; metaopt.rpcRetryOpt.addrs.push_back(mdsMetaServerAddr); metaopt.rpcRetryOpt.rpcTimeoutMs = 500; metaopt.rpcRetryOpt.rpcRetryIntervalUS = 200; @@ -212,16 +214,17 @@ TEST(MetricTest, SlowRequestMetricTest) { std::shared_ptr mdsclient = std::make_shared(); ASSERT_EQ(0, mdsclient->Initialize(metaopt)); - FLAGS_chunkserver_list = "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT + FLAGS_chunkserver_list = + "127.0.0.1:9130:0,127.0.0.1:9131:0,127.0.0.1:9132:0"; // NOLINT - // filename必须是全路径 + // The filename must be a full path std::string filename = "/1_userinfo_"; // init mds service FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid EndPoint ep; butil::str2endpoint("127.0.0.1", 9130, &ep); PeerId pd(ep); @@ -267,13 +270,13 @@ TEST(MetricTest, SlowRequestMetricTest) { ret = fi.Read(buffer, 0, 4096); ASSERT_EQ(4096, ret); - // 先睡眠,确保采样 + // Sleep first to ensure sampling std::this_thread::sleep_for(std::chrono::seconds(2)); ASSERT_GT(fm->writeRPC.latency.max_latency(), 0); ASSERT_GT(fm->readRPC.latency.max_latency(), 0); - // read write超时重试 + // Read write timeout retry mds.EnableNetUnstable(100); ret = fi.Write(buffer, 0, 4096); ASSERT_EQ(-2, ret); @@ -383,5 +386,5 @@ TEST(MetricTest, MetricHelperTest) { ASSERT_NO_THROW(MetricHelper::IncremSlowRequestNum(nullptr)); } -} // namespace client -} // namespace curve +} // namespace client +} // namespace curve diff --git a/test/client/client_session_unittest.cpp b/test/client/client_session_unittest.cpp index 5606204b83..680d80ce93 100644 --- a/test/client/client_session_unittest.cpp +++ b/test/client/client_session_unittest.cpp @@ -20,30 +20,29 @@ * Author: tongguangxun */ -#include -#include +#include +#include #include #include -#include +#include +#include +#include #include #include -#include -#include - -#include // NOLINT -#include // NOLINT #include +#include // NOLINT +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "src/client/client_config.h" -#include "test/client/fake/fakeMDS.h" #include "src/client/file_instance.h" #include "src/client/iomanager4file.h" #include "src/client/libcurve_file.h" #include "test/client/fake/fakeChunkserver.h" +#include "test/client/fake/fakeMDS.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -72,7 +71,7 @@ void sessioncallback(CurveAioContext* aioctx) { TEST(ClientSession, LeaseTaskTest) { FLAGS_chunkserver_list = - "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; + "127.0.0.1:9176:0,127.0.0.1:9177:0,127.0.0.1:9178:0"; std::string filename = "/1"; @@ -80,7 +79,7 @@ TEST(ClientSession, LeaseTaskTest) { FakeMDS mds(filename); mds.Initialize(); mds.StartService(); - // 设置leaderid + // Set leaderid curve::client::EndPoint ep; butil::str2endpoint("127.0.0.1", 9176, &ep); PeerId pd(ep); @@ -104,7 +103,7 @@ TEST(ClientSession, LeaseTaskTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -117,8 +116,8 @@ TEST(ClientSession, LeaseTaskTest) { openresponse.set_allocated_protosession(se); openresponse.set_allocated_fileinfo(finfo); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice->SetOpenFile(openfakeret); // 2. set refresh response @@ -129,7 +128,7 @@ TEST(ClientSession, LeaseTaskTest) { std::unique_lock lk(mtx); refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; info->set_filename(filename); info->set_seqnum(2); info->set_id(1); @@ -143,8 +142,8 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); refreshresp.set_sessionid("1234"); refreshresp.set_allocated_fileinfo(info); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice->SetRefreshSession(refreshfakeret, refresht); // 3. open the file @@ -253,10 +252,9 @@ TEST(ClientSession, LeaseTaskTest) { refreshresp.set_allocated_fileinfo(newFileInfo); refreshresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* refreshFakeRetWithNewInodeId = new FakeReturn( - nullptr, static_cast(&refreshresp)); - curvefsservice->SetRefreshSession( - refreshFakeRetWithNewInodeId, refresht); + FakeReturn* refreshFakeRetWithNewInodeId = + new FakeReturn(nullptr, static_cast(&refreshresp)); + curvefsservice->SetRefreshSession(refreshFakeRetWithNewInodeId, refresht); { std::unique_lock lk(mtx); @@ -302,8 +300,8 @@ TEST(ClientSession, LeaseTaskTest) { // 11. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice->SetCloseFile(closefileret); LOG(INFO) << "uninit fileinstance"; @@ -321,12 +319,12 @@ TEST(ClientSession, LeaseTaskTest) { } // namespace client } // namespace curve -std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT -uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT -uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT -std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT +std::string mdsMetaServerAddr = "127.0.0.1:9101"; // NOLINT +uint32_t segment_size = 1 * 1024 * 1024 * 1024ul; // NOLINT +uint32_t chunk_size = 4 * 1024 * 1024; // NOLINT +std::string configpath = "./test/client/configs/client_session.conf"; // NOLINT -const std::vector clientConf { +const std::vector clientConf{ std::string("mds.listen.addr=127.0.0.1:9101,127.0.0.1:9102"), std::string("global.logPath=./runlog/"), std::string("chunkserver.rpcTimeoutMS=1000"), @@ -337,18 +335,17 @@ const std::vector clientConf { std::string("metacache.rpcRetryIntervalUS=500"), std::string("mds.rpcRetryIntervalUS=500"), std::string("schedule.threadpoolSize=2"), - std::string("mds.maxRetryMS=5000") -}; + std::string("mds.maxRetryMS=5000")}; -int main(int argc, char ** argv) { +int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); ::testing::InitGoogleMock(&argc, argv); google::ParseCommandLineFlags(&argc, &argv, false); curve::CurveCluster* cluster = new curve::CurveCluster(); - cluster->PrepareConfig( - configpath, clientConf); + cluster->PrepareConfig(configpath, + clientConf); int ret = RUN_ALL_TESTS(); return ret; diff --git a/test/client/client_unstable_helper_test.cpp b/test/client/client_unstable_helper_test.cpp index cbb62891a5..4ef1c6487c 100644 --- a/test/client/client_unstable_helper_test.cpp +++ b/test/client/client_unstable_helper_test.cpp @@ -20,10 +20,11 @@ * Author: wuhanqing */ -#include -#include -#include #include +#include +#include +#include + #include #include "src/client/unstable_helper.h" @@ -48,50 +49,51 @@ TEST(UnstableHelperTest, normal_test) { chunkservers.emplace_back(std::make_pair(i, ep)); } - // 先对每个chunkserver进行10次连续超时 + // First, perform 10 consecutive timeouts on each chunkserver for (const auto& cs : chunkservers) { for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(cs.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - cs.first, cs.second)); + helper.GetCurrentUnstableState(cs.first, cs.second)); } } - // 再对每个chunkserver增加一次超时 - // 前两个是chunkserver unstable状态,第三个是server unstable + // Add another timeout to each chunkserver + // The first two are in the chunkserver unstable state, and the third is in + // the server unstable state helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 继续增加超时次数 - // 这种情况下,每次都是chunkserver unstable + // Continue to increase the number of timeouts + // In this case, it is always chunkserver unstable helper.IncreTimeout(chunkservers[0].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[0].first, chunkservers[0].second)); + helper.GetCurrentUnstableState(chunkservers[0].first, + chunkservers[0].second)); helper.IncreTimeout(chunkservers[1].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[1].first, chunkservers[1].second)); + helper.GetCurrentUnstableState(chunkservers[1].first, + chunkservers[1].second)); helper.IncreTimeout(chunkservers[2].first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkservers[2].first, chunkservers[2].second)); + helper.GetCurrentUnstableState(chunkservers[2].first, + chunkservers[2].second)); - // 新chunkserver第一次超时,根据ip判断,可以直接设置为chunkserver unstable + // The first timeout of a new chunkserver can be directly set to chunkserver + // unstable based on the IP address butil::EndPoint ep; butil::str2endpoint("127.100.0.1:60999", &ep); auto chunkserver4 = std::make_pair(4, ep); @@ -99,22 +101,22 @@ TEST(UnstableHelperTest, normal_test) { helper.IncreTimeout(chunkserver4.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver4.first, chunkserver4.second)); + helper.GetCurrentUnstableState(chunkserver4.first, + chunkserver4.second)); - // 其他ip的chunkserver + // Chunkservers for other IPs butil::str2endpoint("127.200.0.1:60999", &ep); auto chunkserver5 = std::make_pair(5, ep); for (int i = 1; i <= opt.maxStableChunkServerTimeoutTimes; ++i) { helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::NoUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } helper.IncreTimeout(chunkserver5.first); ASSERT_EQ(UnstableState::ChunkServerUnstable, - helper.GetCurrentUnstableState( - chunkserver5.first, chunkserver5.second)); + helper.GetCurrentUnstableState(chunkserver5.first, + chunkserver5.second)); } } // namespace client diff --git a/test/client/client_userinfo_unittest.cpp b/test/client/client_userinfo_unittest.cpp index 6153f23e5e..442af59c6f 100644 --- a/test/client/client_userinfo_unittest.cpp +++ b/test/client/client_userinfo_unittest.cpp @@ -20,23 +20,23 @@ * Author: tongguangxun */ -#include +#include #include #include -#include +#include -#include // NOLINT #include +#include // NOLINT #include -#include // NOLINT +#include // NOLINT #include #include "include/client/libcurve.h" #include "src/client/client_common.h" -#include "test/client/fake/fakeMDS.h" -#include "src/client/libcurve_file.h" #include "src/client/iomanager4chunk.h" +#include "src/client/libcurve_file.h" #include "src/client/libcurve_snapshot.h" +#include "test/client/fake/fakeMDS.h" extern std::string mdsMetaServerAddr; extern std::string configpath; @@ -70,8 +70,8 @@ class CurveClientUserAuthFail : public ::testing::Test { ASSERT_EQ(0, server.Join()); } - brpc::Server server; - MetaServerOption metaopt; + brpc::Server server; + MetaServerOption metaopt; FakeMDSCurveFSService curvefsservice; FakeMDSTopologyService topologyservice; }; @@ -102,7 +102,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // set openfile response ::curve::mds::OpenFileResponse openresponse; - curve::mds::FileInfo * finfo = new curve::mds::FileInfo; + curve::mds::FileInfo* finfo = new curve::mds::FileInfo; ::curve::mds::ProtoSession* se = new ::curve::mds::ProtoSession; se->set_sessionid("1"); se->set_createtime(12345); @@ -115,16 +115,16 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { openresponse.mutable_fileinfo()->set_seqnum(2); openresponse.mutable_fileinfo()->set_filename(filename); - FakeReturn* openfakeret - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret); // 1. create a File authfailed ::curve::mds::CreateFileResponse response; response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateFileFakeReturn(fakeret); size_t len = 4 * 1024 * 1024ul; @@ -138,7 +138,7 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { LOG(INFO) << "get refresh session request!"; refreshcv.notify_one(); }; - curve::mds::FileInfo * info = new curve::mds::FileInfo; + curve::mds::FileInfo* info = new curve::mds::FileInfo; ::curve::mds::ReFreshSessionResponse refreshresp; refreshresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); refreshresp.set_sessionid("1234"); @@ -147,12 +147,13 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { refreshresp.mutable_fileinfo()->set_filename(filename); refreshresp.mutable_fileinfo()->set_id(1); refreshresp.mutable_fileinfo()->set_parentid(0); - refreshresp.mutable_fileinfo()->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + refreshresp.mutable_fileinfo()->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT refreshresp.mutable_fileinfo()->set_chunksize(4 * 1024 * 1024); refreshresp.mutable_fileinfo()->set_length(4 * 1024 * 1024 * 1024ul); refreshresp.mutable_fileinfo()->set_ctime(12345678); - FakeReturn* refreshfakeret - = new FakeReturn(nullptr, static_cast(&refreshresp)); + FakeReturn* refreshfakeret = + new FakeReturn(nullptr, static_cast(&refreshresp)); curvefsservice.SetRefreshSession(refreshfakeret, refresht); // 3. open the file auth failed @@ -161,47 +162,47 @@ TEST_F(CurveClientUserAuthFail, CurveClientUserAuthFailTest) { // 4. open file success openresponse.set_statuscode(::curve::mds::StatusCode::kOK); - FakeReturn* openfakeret2 - = new FakeReturn(nullptr, static_cast(&openresponse)); + FakeReturn* openfakeret2 = + new FakeReturn(nullptr, static_cast(&openresponse)); curvefsservice.SetOpenFile(openfakeret2); openret = fileinstance.Open(); ASSERT_EQ(openret, LIBCURVE_ERROR::OK); -/* - // 5. wait for refresh - for (int i = 0; i < 4; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + /* + // 5. wait for refresh + for (int i = 0; i < 4; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - CurveAioContext aioctx; - aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; - aioctx.length = 4 * 1024 * 1024 + 8 * 1024; - aioctx.ret = LIBCURVE_ERROR::OK; - aioctx.cb = sessioncallback; - aioctx.buf = nullptr; - - fileinstance.AioRead(&aioctx); - fileinstance.AioWrite(&aioctx); - - for (int i = 0; i < 1; i++) { - { - std::unique_lock lk(mtx); - refreshcv.wait(lk); + CurveAioContext aioctx; + aioctx.offset = 4 * 1024 * 1024 - 4 * 1024; + aioctx.length = 4 * 1024 * 1024 + 8 * 1024; + aioctx.ret = LIBCURVE_ERROR::OK; + aioctx.cb = sessioncallback; + aioctx.buf = nullptr; + + fileinstance.AioRead(&aioctx); + fileinstance.AioWrite(&aioctx); + + for (int i = 0; i < 1; i++) { + { + std::unique_lock lk(mtx); + refreshcv.wait(lk); + } } - } - char buffer[10]; - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); - ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); -*/ + char buffer[10]; + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Write(buffer, 0, 0)); + ASSERT_EQ(-LIBCURVE_ERROR::DISABLEIO, fileinstance.Read(buffer, 0, 0)); + */ // 6. set fake close return ::curve::mds::CloseFileResponse closeresp; closeresp.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* closefileret - = new FakeReturn(nullptr, static_cast(&closeresp)); + FakeReturn* closefileret = + new FakeReturn(nullptr, static_cast(&closeresp)); curvefsservice.SetCloseFile(closefileret); ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, fileinstance.Close()); @@ -235,12 +236,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -255,54 +255,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - emptyuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, emptyuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - emptyuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, emptyuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16 * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - emptyuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, emptyuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -311,7 +308,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -319,20 +317,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - emptyuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, emptyuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); + cl.ListSnapShot(filename, emptyuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; @@ -341,7 +338,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientUserAuthFailTest) { delete delfakeret; } -// root user测试 +// Root user testing TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ClientConfigOption opt; opt.metaServerOpt.rpcRetryOpt.rpcTimeoutMs = 500; @@ -359,7 +356,7 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ASSERT_TRUE(!cl.Init(opt)); UserInfo_t rootuserinfo; - rootuserinfo.owner ="root"; + rootuserinfo.owner = "root"; rootuserinfo.password = "123"; std::string filename = "./1_usertest_.img"; @@ -370,12 +367,11 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { ::curve::mds::CreateSnapShotResponse response; response.set_statuscode(::curve::mds::StatusCode::kOK); response.clear_snapshotfileinfo(); - FakeReturn* fakeret - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret); - ASSERT_EQ(-LIBCURVE_ERROR::FAILED, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::FAILED, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // set response response.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); @@ -390,54 +386,51 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { finf->set_seqnum(2); finf->set_segmentsize(1 * 1024 * 1024 * 1024); response.set_allocated_snapshotfileinfo(finf); - FakeReturn* fakeret1 - = new FakeReturn(nullptr, static_cast(&response)); + FakeReturn* fakeret1 = + new FakeReturn(nullptr, static_cast(&response)); curvefsservice.SetCreateSnapShot(fakeret1); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.CreateSnapShot(filename, - rootuserinfo, - &seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.CreateSnapShot(filename, rootuserinfo, &seq)); // test delete // normal delete test ::curve::mds::DeleteSnapShotResponse delresponse; delresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* delfakeret - = new FakeReturn(nullptr, static_cast(&delresponse)); + FakeReturn* delfakeret = + new FakeReturn(nullptr, static_cast(&delresponse)); curvefsservice.SetDeleteSnapShot(delfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.DeleteSnapShot(filename, - rootuserinfo, - seq)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.DeleteSnapShot(filename, rootuserinfo, seq)); // test get SegmentInfo // normal getinfo curve::mds::GetOrAllocateSegmentResponse* getresponse = - new curve::mds::GetOrAllocateSegmentResponse(); + new curve::mds::GetOrAllocateSegmentResponse(); curve::mds::PageFileSegment* pfs = new curve::mds::PageFileSegment; pfs->set_logicalpoolid(0); - pfs->set_segmentsize(1ull*1024*1024*1024); - pfs->set_chunksize(16ull*1024*1024); + pfs->set_segmentsize(1ull * 1024 * 1024 * 1024); + pfs->set_chunksize(16ull * 1024 * 1024); pfs->set_startoffset(0); getresponse->set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); getresponse->set_allocated_pagefilesegment(pfs); - FakeReturn* getfakeret = new FakeReturn(nullptr, - static_cast(getresponse)); + FakeReturn* getfakeret = + new FakeReturn(nullptr, static_cast(getresponse)); curvefsservice.SetGetSnapshotSegmentInfo(getfakeret); ::curve::mds::topology::GetChunkServerListInCopySetsResponse* geresponse_1 = - new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); + new ::curve::mds::topology::GetChunkServerListInCopySetsResponse(); geresponse_1->set_statuscode(0); - FakeReturn* faktopologyeret = new FakeReturn(nullptr, - static_cast(geresponse_1)); + FakeReturn* faktopologyeret = + new FakeReturn(nullptr, static_cast(geresponse_1)); topologyservice.SetFakeReturn(faktopologyeret); SegmentInfo seginfo; LogicalPoolCopysetIDInfo lpcsIDInfo; - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.GetSnapshotSegmentInfo(filename, - rootuserinfo, - 0, 0, &seginfo)); + ASSERT_EQ( + -LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapshotSegmentInfo(filename, rootuserinfo, 0, 0, &seginfo)); // test list snapshot // normal delete test @@ -446,7 +439,8 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_filename(filename); listresponse.mutable_fileinfo(0)->set_id(1); listresponse.mutable_fileinfo(0)->set_parentid(0); - listresponse.mutable_fileinfo(0)->set_filetype(curve::mds::FileType::INODE_PAGEFILE); // NOLINT + listresponse.mutable_fileinfo(0)->set_filetype( + curve::mds::FileType::INODE_PAGEFILE); // NOLINT listresponse.mutable_fileinfo(0)->set_chunksize(4 * 1024 * 1024); listresponse.mutable_fileinfo(0)->set_length(4 * 1024 * 1024 * 1024ul); listresponse.mutable_fileinfo(0)->set_ctime(12345678); @@ -454,21 +448,19 @@ TEST_F(CurveClientUserAuthFail, CurveSnapClientRootUserAuthTest) { listresponse.mutable_fileinfo(0)->set_segmentsize(1 * 1024 * 1024 * 1024ul); listresponse.set_statuscode(::curve::mds::StatusCode::kOwnerAuthFail); - FakeReturn* listfakeret - = new FakeReturn(nullptr, static_cast(&listresponse)); + FakeReturn* listfakeret = + new FakeReturn(nullptr, static_cast(&listresponse)); curve::client::FInfo_t sinfo; curvefsservice.SetListSnapShot(listfakeret); - ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, cl.GetSnapShot(filename, - rootuserinfo, - seq, &sinfo)); + ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, + cl.GetSnapShot(filename, rootuserinfo, seq, &sinfo)); std::vector seqvec; std::map fivec; seqvec.push_back(seq); curve::client::FInfo_t ffinfo; ASSERT_EQ(-LIBCURVE_ERROR::AUTHFAIL, - cl.ListSnapShot(filename, rootuserinfo, - &seqvec, &fivec)); + cl.ListSnapShot(filename, rootuserinfo, &seqvec, &fivec)); cl.UnInit(); delete fakeret; diff --git a/test/common/task_thread_pool_test.cpp b/test/common/task_thread_pool_test.cpp index cb44a36b09..ac658e9a90 100644 --- a/test/common/task_thread_pool_test.cpp +++ b/test/common/task_thread_pool_test.cpp @@ -20,26 +20,27 @@ * Author: wudemiao */ +#include "src/common/concurrent/task_thread_pool.h" + #include -#include #include +#include #include "src/common/concurrent/count_down_event.h" -#include "src/common/concurrent/task_thread_pool.h" namespace curve { namespace common { using curve::common::CountDownEvent; -void TestAdd1(int a, double b, CountDownEvent *cond) { +void TestAdd1(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); } -int TestAdd2(int a, double b, CountDownEvent *cond) { +int TestAdd2(int a, double b, CountDownEvent* cond) { double c = a + b; (void)c; cond->Signal(); @@ -47,7 +48,7 @@ int TestAdd2(int a, double b, CountDownEvent *cond) { } TEST(TaskThreadPool, basic) { - /* 测试线程池 start 入参 */ + /* Test thread pool start input parameter */ { TaskThreadPool<> taskThreadPool; ASSERT_EQ(-1, taskThreadPool.Start(2, 0)); @@ -74,7 +75,7 @@ TEST(TaskThreadPool, basic) { } { - /* 测试不设置,此时为 INT_MAX */ + /* Test not set, at this time it is INT_MAX */ TaskThreadPool<> taskThreadPool; ASSERT_EQ(0, taskThreadPool.Start(4)); ASSERT_EQ(INT_MAX, taskThreadPool.QueueCapacity()); @@ -92,7 +93,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond1(1); taskThreadPool.Enqueue(TestAdd1, 1, 1.234, &cond1); cond1.Wait(); - /* TestAdd2 是有返回值的 function */ + /* TestAdd2 is a function with a return value */ CountDownEvent cond2(1); taskThreadPool.Enqueue(TestAdd2, 1, 1.234, &cond2); cond2.Wait(); @@ -100,7 +101,7 @@ TEST(TaskThreadPool, basic) { taskThreadPool.Stop(); } - /* 基本运行 task 测试 */ + /* Basic task testing */ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -133,14 +134,14 @@ TEST(TaskThreadPool, basic) { t2.join(); t3.join(); - /* 等待所有 task 执行完毕 */ + /* Wait for all tasks to complete execution */ cond.Wait(); ASSERT_EQ(3 * kMaxLoop, runTaskCount.load(std::memory_order_acquire)); taskThreadPool.Stop(); } - /* 测试队列满了,push会阻塞 */ + /* The test queue is full, push will block */ { std::atomic runTaskCount; runTaskCount.store(0, std::memory_order_release); @@ -157,8 +158,7 @@ TEST(TaskThreadPool, basic) { CountDownEvent cond4(1); CountDownEvent startRunCond4(1); - auto waitTask = [&](CountDownEvent* sigCond, - CountDownEvent* waitCond) { + auto waitTask = [&](CountDownEvent* sigCond, CountDownEvent* waitCond) { sigCond->Signal(); waitCond->Wait(); runTaskCount.fetch_add(1, std::memory_order_acq_rel); @@ -169,12 +169,15 @@ TEST(TaskThreadPool, basic) { ASSERT_EQ(kQueueCapacity, taskThreadPool.QueueCapacity()); ASSERT_EQ(kThreadNums, taskThreadPool.ThreadOfNums()); - /* 把线程池的所有处理线程都卡住了 */ + /* Stuck all processing threads in the thread pool */ taskThreadPool.Enqueue(waitTask, &startRunCond1, &cond1); taskThreadPool.Enqueue(waitTask, &startRunCond2, &cond2); taskThreadPool.Enqueue(waitTask, &startRunCond3, &cond3); taskThreadPool.Enqueue(waitTask, &startRunCond4, &cond4); - /* 等待 waitTask1、waitTask2、waitTask3、waitTask4 都开始运行 */ + /* + * Wait for waitTask1, waitTask2, waitTask3, and waitTask4 + * to start running + */ startRunCond1.Wait(); startRunCond2.Wait(); startRunCond3.Wait(); @@ -186,7 +189,7 @@ TEST(TaskThreadPool, basic) { runTaskCount.fetch_add(1, std::memory_order_acq_rel); }; - /* 记录线程 push 到线程池 queue 的 task 数量 */ + /* Record the number of tasks from thread push to thread pool queue */ std::atomic pushTaskCount1; std::atomic pushTaskCount2; std::atomic pushTaskCount3; @@ -208,7 +211,7 @@ TEST(TaskThreadPool, basic) { std::thread t2(std::bind(threadFunc, &pushTaskCount2)); std::thread t3(std::bind(threadFunc, &pushTaskCount3)); - /* 等待线程池 queue 被 push 满 */ + /* Waiting for thread pool queue to be pushed full */ int pushTaskCount; while (true) { ::usleep(50); @@ -222,32 +225,33 @@ TEST(TaskThreadPool, basic) { } } - /* push 进去的 task 都没有被执行 */ + /* The tasks that were pushed in were not executed */ ASSERT_EQ(0, runTaskCount.load(std::memory_order_acquire)); /** - * 此时,thread pool 的 queue 肯定 push 满了,且 push - * 满了之后就没法再 push 了 + * At this point, the thread pool queue must be full of push, and the + * push After it's full, it can't push anymore */ ASSERT_EQ(pushTaskCount, taskThreadPool.QueueCapacity()); ASSERT_EQ(taskThreadPool.QueueCapacity(), taskThreadPool.QueueSize()); - /* 将线程池中的线程都唤醒 */ + /* Wake up all threads in the thread pool */ cond1.Signal(); cond2.Signal(); cond3.Signal(); cond4.Signal(); - /* 等待所有 task 执行完成 */ + /* Wait for all task executions to complete */ while (true) { ::usleep(10); - if (runTaskCount.load(std::memory_order_acquire) - >= 4 + 3 * kMaxLoop) { + if (runTaskCount.load(std::memory_order_acquire) >= + 4 + 3 * kMaxLoop) { break; } } /** - * 等待所有的 push thread 退出,这样才能保证 pushThreadCount 计数更新了 + * Wait for all push threads to exit so that + * the pushThreadCount count is updated */ pushThreadCond.Wait(); diff --git a/test/integration/client/unstable_chunkserver_exception_test.cpp b/test/integration/client/unstable_chunkserver_exception_test.cpp index ea5c7e4c37..3f10f365ae 100644 --- a/test/integration/client/unstable_chunkserver_exception_test.cpp +++ b/test/integration/client/unstable_chunkserver_exception_test.cpp @@ -20,26 +20,26 @@ * Author: wuhanqing */ -#include -#include #include +#include +#include -#include -#include -#include // NOLINT -#include // NOLINT +#include #include -#include +#include +#include // NOLINT +#include #include -#include +#include // NOLINT #include -#include -#include // NOLINT +#include +#include // NOLINT +#include #include "include/client/libcurve.h" -#include "src/common/timeutility.h" #include "src/client/client_metric.h" #include "src/client/inflight_controller.h" +#include "src/common/timeutility.h" #include "test/integration/client/common/file_operation.h" #include "test/integration/cluster_common/cluster.h" #include "test/util/config_generator.h" @@ -58,16 +58,14 @@ const char* kLogPath = "./runlog/"; curve::client::PerSecondMetric iops("test", "iops"); -std::atomic running{ false }; +std::atomic running{false}; const std::vector chunkserverConfigOpts{ "chunkfilepool.enable_get_chunk_from_pool=false", - "walfilepool.enable_get_segment_from_pool=false" -}; + "walfilepool.enable_get_segment_from_pool=false"}; -const std::vector mdsConfigOpts{ - std::string("mds.etcd.endpoint=") + std::string(kEtcdClientIpPort) -}; +const std::vector mdsConfigOpts{std::string("mds.etcd.endpoint=") + + std::string(kEtcdClientIpPort)}; const std::vector clientConfigOpts{ std::string("mds.listen.addr=") + kMdsIpPort, @@ -81,9 +79,8 @@ const std::vector mdsConf{ std::string("--confPath=") + kMdsConfPath, std::string("--mdsAddr=") + kMdsIpPort, std::string("--etcdAddr=") + kEtcdClientIpPort, - { "--log_dir=./runlog/mds" }, - { "--stderrthreshold=3" } -}; + {"--log_dir=./runlog/mds"}, + {"--stderrthreshold=3"}}; const std::vector chunkserverConfTemplate{ {"-raft_sync_segments=true"}, @@ -138,20 +135,16 @@ std::vector GenChunkserverConf(int port) { return conf; } -off_t RandomWriteOffset() { - return rand() % 32 * (16 * 1024 * 1024); -} +off_t RandomWriteOffset() { return rand() % 32 * (16 * 1024 * 1024); } -size_t RandomWriteLength() { - return rand() % 32 * 4096; -} +size_t RandomWriteLength() { return rand() % 32 * 4096; } static char buffer[1024 * 4096]; struct ChunkserverParam { int id; int port; - std::string addr{ "127.0.0.1:" }; + std::string addr{"127.0.0.1:"}; std::vector conf; ChunkserverParam(int id, int port) { @@ -165,7 +158,7 @@ struct ChunkserverParam { class UnstableCSModuleException : public ::testing::Test { protected: static void SetUpTestCase() { - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf ttt"); system("mkdir -p ttt"); @@ -175,7 +168,7 @@ class UnstableCSModuleException : public ::testing::Test { cluster.reset(new CurveCluster()); ASSERT_NE(nullptr, cluster.get()); - // 生成配置文件 + // Generate Configuration File cluster->PrepareConfig(kMdsConfPath, mdsConfigOpts); cluster->PrepareConfig(kCSConfPath, @@ -183,50 +176,52 @@ class UnstableCSModuleException : public ::testing::Test { cluster->PrepareConfig(kClientConfPath, clientConfigOpts); - // 1. 启动etcd + // 1. Start etcd pid_t pid = cluster->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, std::vector{ - "--name=module_exception_curve_unstable_cs" }); + "--name=module_exception_curve_unstable_cs"}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << ":" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 2. 启动一个mds + // 2. Start an mds pid = cluster->StartSingleMDS(1, kMdsIpPort, 30013, mdsConf, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); std::this_thread::sleep_for(std::chrono::seconds(2)); - // 3. 创建物理池 - ASSERT_EQ( - 0, - cluster->PreparePhysicalPool( - 1, - "./test/integration/client/config/unstable/" - "topo_unstable.json")); + // 3. Creating a physical pool + ASSERT_EQ(0, cluster->PreparePhysicalPool( + 1, + "./test/integration/client/config/unstable/" + "topo_unstable.json")); - // 4. 创建chunkserver + // 4. Create chunkserver StartAllChunkserver(); std::this_thread::sleep_for(std::chrono::seconds(5)); - // 5. 创建逻辑池,并睡眠一段时间让底层copyset先选主 - ASSERT_EQ(0, cluster->PrepareLogicalPool( - 1, "test/integration/client/config/unstable/topo_unstable.json")); + // 5. Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first + ASSERT_EQ( + 0, + cluster->PrepareLogicalPool( + 1, + "test/integration/client/config/unstable/topo_unstable.json")); std::this_thread::sleep_for(std::chrono::seconds(10)); - // 6. 初始化client配置 + // 6. Initialize client configuration int ret = Init(kClientConfPath); ASSERT_EQ(ret, 0); - // 7. 先睡眠10s,让chunkserver选出leader + // 7. Sleep for 10 seconds first and let chunkserver select the leader std::this_thread::sleep_for(std::chrono::seconds(10)); } static void TearDownTestCase() { UnInit(); ASSERT_EQ(0, cluster->StopCluster()); - // 清理文件夹 + // Clean Up Folder system("rm -rf module_exception_curve_unstable_cs.etcd"); system("rm -rf module_exception_curve_unstable_cs"); system("rm -rf ttt"); @@ -300,7 +295,8 @@ class UnstableCSModuleException : public ::testing::Test { int UnstableCSModuleException::fd = 0; std::unique_ptr UnstableCSModuleException::cluster; -std::unordered_map UnstableCSModuleException::chunkServers; // NOLINT +std::unordered_map + UnstableCSModuleException::chunkServers; // NOLINT TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { const std::string filename = "/TestCommonReadAndWrite"; @@ -323,15 +319,15 @@ TEST_F(UnstableCSModuleException, TestCommonReadAndWrite) { ::Close(fd); } -// 集群拓扑结构 -// 1个client -// 1个etcd -// 1个mds -// 3个zone,每个里面2个chunkserver +// Cluster topology +// 1 client +// 1 ETCD +// 1 mds +// 3 zones, each with 2 chunkservers inside TEST_F(UnstableCSModuleException, HangOneZone) { srand(time(nullptr)); - // 开启多个线程写文件 + // Enable multiple threads to write files LOG(INFO) << "starting write..."; running = true; std::vector openAndWriteThreads; @@ -341,7 +337,7 @@ TEST_F(UnstableCSModuleException, HangOneZone) { "/test" + std::to_string(i)); } - // 正常写入60s, 并记录后30秒的iops + // Write normally for 60 seconds and record the IOPS for the next 30 seconds std::vector beforeRecords; std::this_thread::sleep_for(std::chrono::seconds(30)); for (int i = 1; i <= 30; ++i) { @@ -353,18 +349,18 @@ TEST_F(UnstableCSModuleException, HangOneZone) { beforeRecords.size(); LOG(INFO) << "iops before hang: " << beforeAvgIOps; - // hang一个zone的chunkserver + // Hang a chunkserver for a zone LOG(INFO) << "hang one zone"; ASSERT_EQ(0, cluster->HangChunkServer(1)); ASSERT_EQ(0, cluster->HangChunkServer(2)); std::vector afterRecords; - // 打印每一秒的iops情况 + // Print IOPS per second for (int i = 1; i <= 10; ++i) { std::this_thread::sleep_for(std::chrono::seconds(1)); auto tmp = iops.value.get_value(1); LOG(INFO) << "after " << i << "s, iops: " << tmp; - // 记录后5s的iops值 + // Record the iops value for 5 seconds after recording if (i >= 5) { afterRecords.push_back(tmp); } diff --git a/test/integration/cluster_common/cluster.cpp b/test/integration/cluster_common/cluster.cpp index 5d32ef8539..72410a5ca7 100644 --- a/test/integration/cluster_common/cluster.cpp +++ b/test/integration/cluster_common/cluster.cpp @@ -20,26 +20,28 @@ * Author: lixiaocui */ -#include +#include "test/integration/cluster_common/cluster.h" + #include -#include -#include -#include #include #include #include -#include -#include //NOLINT +#include +#include +#include +#include + #include //NOLINT +#include #include +#include +#include //NOLINT #include #include -#include -#include "test/integration/cluster_common/cluster.h" +#include "src/client/client_common.h" #include "src/common/string_util.h" #include "src/common/timeutility.h" -#include "src/client/client_common.h" #include "src/kvstorageclient/etcd_client.h" using ::curve::client::UserInfo_t; @@ -50,29 +52,29 @@ namespace curve { using ::curve::client::CreateFileContext; -int CurveCluster::InitMdsClient(const curve::client::MetaServerOption &op) { +int CurveCluster::InitMdsClient(const curve::client::MetaServerOption& op) { mdsClient_ = std::make_shared(); return mdsClient_->Initialize(op); } -std::vector VecStr2VecChar(std::vector args) { - std::vector argv(args.size() + 1); // for the NULL terminator +std::vector VecStr2VecChar(std::vector args) { + std::vector argv(args.size() + 1); // for the NULL terminator for (std::size_t i = 0; i < args.size(); ++i) { // not include cmd - argv[i] = new char[args[i].size()+1]; + argv[i] = new char[args[i].size() + 1]; snprintf(argv[i], args[i].size() + 1, "%s", args[i].c_str()); } argv[args.size()] = NULL; return argv; } -void ClearArgv(const std::vector &argv) { - for (auto const &item : argv) { - delete [] item; +void ClearArgv(const std::vector& argv) { + for (auto const& item : argv) { + delete[] item; } } int CurveCluster::InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints) { + const std::string& etcdEndpoints) { EtcdConf conf; conf.Endpoints = new char[etcdEndpoints.size()]; std::memcpy(conf.Endpoints, etcdEndpoints.c_str(), etcdEndpoints.size()); @@ -88,8 +90,8 @@ int CurveCluster::InitSnapshotCloneMetaStoreEtcd( } auto codec = std::make_shared(); - metaStore_ = std::make_shared(etcdClient, - codec); + metaStore_ = + std::make_shared(etcdClient, codec); if (metaStore_->Init() < 0) { LOG(ERROR) << "metaStore init fail."; return -1; @@ -106,17 +108,13 @@ int CurveCluster::StopCluster() { LOG(INFO) << "stop cluster begin..."; int ret = 0; - if (StopAllMDS() < 0) - ret = -1; + if (StopAllMDS() < 0) ret = -1; - if (StopAllChunkServer() < 0) - ret = -1; + if (StopAllChunkServer() < 0) ret = -1; - if (StopAllSnapshotCloneServer() < 0) - ret = -1; + if (StopAllSnapshotCloneServer() < 0) ret = -1; - if (StopAllEtcd() < 0) - ret = -1; + if (StopAllEtcd() < 0) ret = -1; if (!ret) LOG(INFO) << "success stop cluster"; @@ -125,9 +123,9 @@ int CurveCluster::StopCluster() { return ret; } -int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, +int CurveCluster::StartSingleMDS(int id, const std::string& ipPort, int dummyPort, - const std::vector &mdsConf, + const std::vector& mdsConf, bool expectLeader) { LOG(INFO) << "start mds " << ipPort << " begin..."; pid_t pid = ::fork(); @@ -135,20 +133,21 @@ int CurveCluster::StartSingleMDS(int id, const std::string &ipPort, LOG(ERROR) << "start mds " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个mds + // Start an mds in a child process // ./bazel-bin/src/mds/main/curvemds std::vector args; args.emplace_back("./bazel-bin/src/mds/main/curvemds"); args.emplace_back("--mdsAddr=" + ipPort); args.emplace_back("--dummyPort=" + std::to_string(dummyPort)); - for (auto &item : mdsConf) { + for (auto& item : mdsConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/mds/main/curvemds", argv.data()); ClearArgv(argv); @@ -221,26 +220,27 @@ int CurveCluster::StopAllMDS() { } int CurveCluster::StartSnapshotCloneServer( - int id, const std::string &ipPort, - const std::vector &snapshotcloneConf) { + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf) { LOG(INFO) << "start snapshotcloneserver " << ipPort << " begin ..."; pid_t pid = ::fork(); if (0 > pid) { LOG(ERROR) << "start snapshotcloneserver " << ipPort << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个snapshotcloneserver + // Starting a snapshotcloneserver in a child process std::vector args; args.emplace_back( "./bazel-bin/src/snapshotcloneserver/snapshotcloneserver"); args.emplace_back("--addr=" + ipPort); - for (auto &item : snapshotcloneConf) { + for (auto& item : snapshotcloneConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/snapshotcloneserver/snapshotcloneserver", argv.data()); @@ -317,19 +317,18 @@ int CurveCluster::StopAllSnapshotCloneServer() { int ret = 0; auto tempMap = snapPidMap_; for (auto pair : tempMap) { - if (StopSnapshotCloneServer(pair.first) < 0) - ret = -1; + if (StopSnapshotCloneServer(pair.first) < 0) ret = -1; } - // 等待进程完全退出 + // Wait for the process to completely exit ::sleep(2); LOG(INFO) << "stop all snapshotcloneservver end."; return ret; } -int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf) { +int CurveCluster::StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf) { LOG(INFO) << "start etcd " << clientIpPort << " begin..."; pid_t pid = ::fork(); @@ -337,7 +336,7 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, LOG(ERROR) << "start etcd " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个etcd + // Start an ETCD in a child process // ip netns exec integ_etcd1 etcd std::vector args{"etcd"}; args.emplace_back("--listen-peer-urls=http://" + peerIpPort); @@ -348,14 +347,15 @@ int CurveCluster::StartSingleEtcd(int id, const std::string &clientIpPort, args.emplace_back("--initial-cluster-token=etcd-cluster-1"); args.emplace_back("--election-timeout=3000"); args.emplace_back("--heartbeat-interval=300"); - for (auto &item : etcdConf) { + for (auto& item : etcdConf) { args.push_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("etcd", argv.data()); ClearArgv(argv); @@ -380,7 +380,7 @@ bool CurveCluster::WaitForEtcdClusterAvalible(int waitSec) { return false; } else { int i = 0; - for (auto &item : etcdClientIpPort_) { + for (auto& item : etcdClientIpPort_) { i++; if (i == etcdClientIpPort_.size()) { endpoint += "http://" + item.second; @@ -464,9 +464,9 @@ int CurveCluster::StopAllEtcd() { return ret; } -int CurveCluster::FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystempath, +int CurveCluster::FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystempath, uint32_t size) { LOG(INFO) << "FormatFilePool begin..."; @@ -475,8 +475,7 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, " -filePoolMetaPath=" + filePoolmetapath + " -fileSystemPath=" + filesystempath + " -allocateByPercent=false -preAllocateNum=" + - std::to_string(size * 300) + - " -needWriteZero=false"; + std::to_string(size * 300) + " -needWriteZero=false"; RETURN_IF_NOT_ZERO(system(cmd.c_str())); @@ -485,8 +484,8 @@ int CurveCluster::FormatFilePool(const std::string &filePooldir, } int CurveCluster::StartSingleChunkServer( - int id, const std::string &ipPort, - const std::vector &chunkserverConf) { + int id, const std::string& ipPort, + const std::vector& chunkserverConf) { LOG(INFO) << "start chunkserver " << id << ", " << ipPort << " begin..."; std::vector split; ::curve::common::SplitString(ipPort, ":", &split); @@ -500,19 +499,20 @@ int CurveCluster::StartSingleChunkServer( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + split[0]); args.emplace_back("-chunkServerPort=" + split[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execv("./bazel-bin/src/chunkserver/chunkserver", argv.data()); ClearArgv(argv); @@ -530,7 +530,7 @@ int CurveCluster::StartSingleChunkServer( } int CurveCluster::StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf) { + int id, const std::vector& chunkserverConf) { std::vector ipPort; ::curve::common::SplitString(ChunkServerIpPortInBackground(id), ":", &ipPort); @@ -547,7 +547,7 @@ int CurveCluster::StartSingleChunkServerInBackground( LOG(ERROR) << "start chunkserver " << id << " fork failed"; return -1; } else if (0 == pid) { - // 在子进程中起一个chunkserver + // Starting a chunkserver in a child process std::vector args; args.emplace_back("netns"); args.emplace_back("exec"); @@ -555,13 +555,14 @@ int CurveCluster::StartSingleChunkServerInBackground( args.emplace_back("./bazel-bin/src/chunkserver/chunkserver"); args.emplace_back("-chunkServerIp=" + ipPort[0]); args.emplace_back("-chunkServerPort=" + ipPort[1]); - for (auto &item : chunkserverConf) { + for (auto& item : chunkserverConf) { args.emplace_back(item); } - std::vector argv = VecStr2VecChar(args); + std::vector argv = VecStr2VecChar(args); /** - * 重要提示!!!! - * fork后,子进程尽量不要用LOG()打印,可能死锁!!! + * Important reminder!!!! + * After forking, try not to use LOG() printing for child processes, as + * it may cause deadlock!!! */ execvp("ip", argv.data()); ClearArgv(argv); @@ -723,7 +724,7 @@ std::string CurveCluster::ChunkServerIpPortInBackground(int id) { } int CurveCluster::PreparePhysicalPool(int mdsId, - const std::string &clusterMap) { + const std::string& clusterMap) { LOG(INFO) << "create physicalpool begin..."; std::string createPPCmd = std::string("./bazel-bin/tools/curvefsTool") + @@ -741,15 +742,14 @@ int CurveCluster::PreparePhysicalPool(int mdsId, return 0; } -int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { +int CurveCluster::PrepareLogicalPool(int mdsId, const std::string& clusterMap) { LOG(INFO) << "create logicalpool begin..."; - std::string createLPCmd = - std::string("./bazel-bin/tools/curvefsTool") + - std::string(" -cluster_map=") + clusterMap + - std::string(" -mds_addr=") + MDSIpPort(mdsId) + - std::string(" -op=create_logicalpool") + - std::string(" -stderrthreshold=0 -minloglevel=0"); + std::string createLPCmd = std::string("./bazel-bin/tools/curvefsTool") + + std::string(" -cluster_map=") + clusterMap + + std::string(" -mds_addr=") + MDSIpPort(mdsId) + + std::string(" -op=create_logicalpool") + + std::string(" -stderrthreshold=0 -minloglevel=0"); LOG(INFO) << "exec cmd: " << createLPCmd; RETURN_IF_NOT_ZERO(system(createLPCmd.c_str())); @@ -758,7 +758,7 @@ int CurveCluster::PrepareLogicalPool(int mdsId, const std::string &clusterMap) { return 0; } -bool CurveCluster::CurrentServiceMDS(int *curId) { +bool CurveCluster::CurrentServiceMDS(int* curId) { for (auto mdsId : mdsPidMap_) { if (0 == ProbePort(mdsIpPort_[mdsId.first], 20000, true)) { *curId = mdsId.first; @@ -772,8 +772,8 @@ bool CurveCluster::CurrentServiceMDS(int *curId) { return false; } -int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize, +int CurveCluster::CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize, bool normalFile, const std::string& poolset) { LOG(INFO) << "create file: " << fileName << ", size: " << fileSize << " begin..."; @@ -785,13 +785,12 @@ int CurveCluster::CreateFile(const std::string &user, const std::string &pwd, context.length = fileSize; context.poolset = poolset; - RETURN_IF_NOT_ZERO( - mdsClient_->CreateFile(context)); + RETURN_IF_NOT_ZERO(mdsClient_->CreateFile(context)); LOG(INFO) << "success create file"; return 0; } -int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, +int CurveCluster::ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen) { int socket_fd = socket(AF_INET, SOCK_STREAM, 0); if (-1 == socket_fd) { @@ -819,7 +818,7 @@ int CurveCluster::ProbePort(const std::string &ipPort, int64_t timeoutMs, uint64_t start = ::curve::common::TimeUtility::GetTimeofDayMs(); while (::curve::common::TimeUtility::GetTimeofDayMs() - start < timeoutMs) { int connectRes = - connect(socket_fd, (struct sockaddr *)&addr, sizeof(addr)); + connect(socket_fd, (struct sockaddr*)&addr, sizeof(addr)); if (expectOpen && connectRes == 0) { LOG(INFO) << "probe " << ipPort << " success."; close(socket_fd); diff --git a/test/integration/cluster_common/cluster.h b/test/integration/cluster_common/cluster.h index e5b04d30c7..4418f65ede 100644 --- a/test/integration/cluster_common/cluster.h +++ b/test/integration/cluster_common/cluster.h @@ -23,215 +23,222 @@ #ifndef TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ #define TEST_INTEGRATION_CLUSTER_COMMON_CLUSTER_H_ -#include #include -#include #include -#include "src/client/mds_client.h" +#include +#include + #include "src/client/config_info.h" -#include "test/util/config_generator.h" +#include "src/client/mds_client.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store_etcd.h" +#include "test/util/config_generator.h" -using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; using ::curve::client::MDSClient; +using ::curve::snapshotcloneserver::SnapshotCloneMetaStoreEtcd; namespace curve { -#define RETURN_IF_NOT_ZERO(x) \ - do { \ - int ret = (x); \ - if (ret != 0) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get non-ZERO, return -1"; \ - return ret; \ - } \ +#define RETURN_IF_NOT_ZERO(x) \ + do { \ + int ret = (x); \ + if (ret != 0) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get non-ZERO, return -1"; \ + return ret; \ + } \ } while (0) -#define RETURN_IF_FALSE(x) \ - do { \ - bool ret = (x); \ - if (!ret) { \ - LOG(ERROR) << __FILE__ << ":" << __LINE__ \ - << "-> get FALSE, return -1"; \ - return -1; \ - } \ +#define RETURN_IF_FALSE(x) \ + do { \ + bool ret = (x); \ + if (!ret) { \ + LOG(ERROR) << __FILE__ << ":" << __LINE__ \ + << "-> get FALSE, return -1"; \ + return -1; \ + } \ } while (0) class CurveCluster { public: /** - * CurveCluster 构造函数 + * @brief CurveCluster constructor * - * @param[in] netWorkSegment 网桥的网络地址,默认为"192.168.200." - * @param[in] nsPrefix 网络命名空间的前缀,默认为"integ_" + * @param[in] netWorkSegment: The network address of the bridge, which + * defaults to "192.168.200." + * @param[in] nsPrefix: The prefix of the network namespace, which defaults + * to "integ_" */ - CurveCluster(const std::string &netWorkSegment = "192.168.200.", - const std::string &nsPrefix = "integ_") + CurveCluster(const std::string& netWorkSegment = "192.168.200.", + const std::string& nsPrefix = "integ_") : networkSegment_(netWorkSegment), nsPrefix_(nsPrefix) {} /** - * InitMdsClient 初始化mdsclient, 用于和mds交互 + * @brief InitMdsClient, initializes mdsclient for interaction with mds * - * @param op 参数设置 - * @return 0.成功; 非0.失败 + * @param op: parameter setting + * @return 0 Success; Non 0 Failure */ - int InitMdsClient(const curve::client::MetaServerOption &op); - + int InitMdsClient(const curve::client::MetaServerOption& op); /** - * @brief 初始化metastore + * @brief Initialize metastore * - * @param[in] etcdEndpoints etcd client的ip port + * @param[in] etcdEndpoints: etcd client's IP port * - * @return 返回错误码 + * @return returns an error code */ - int InitSnapshotCloneMetaStoreEtcd( - const std::string &etcdEndpoints); + int InitSnapshotCloneMetaStoreEtcd(const std::string& etcdEndpoints); /** - * BuildNetWork 如果需要是用不同的ip来起chunkserver, - * 需要在测试用例的SetUp中先 调用该函数 - * @return 0.成功; 非0.失败 + * @brief BuildNetWork, If BuildNet needs to use a different IP to start the + * chunkserver, This function needs to be called first in the SetUp of the + * test case + * + * @return 0 Success; Non 0 Failure */ int BuildNetWork(); /** - * StopCluster 停止该集群中所有的进程 - * @return 0.成功; -1.失败 + * @brief StopCluster, stops all processes in the cluster + * + * @return 0.Success; -1.Failure */ int StopCluster(); /** - * @brief 生成各模块配置文件 + * @brief Generate configuration files for each module * - * @tparam T 任一ConfigGenerator - * @param configPath 配置文件路径 - * @param options 修改的配置项 + * @tparam T: any ConfigGenerator + * @param configPath: Configuration file path + * @param options: Configuration items modified */ - template - void PrepareConfig(const std::string &configPath, - const std::vector &options) { + template + void PrepareConfig(const std::string& configPath, + const std::vector& options) { T gentor(configPath); gentor.SetConfigOptions(options); gentor.Generate(); } /** - * StartSingleMDS 启动一个mds - * 如果需要不同ip的chunkserver,ipPort请设置为192.168.200.1:XXXX + * @brief StartSingleMDS starts an mds + * If need chunkservers with different IPs, please set the ipPort to + * 192.168.200.1:XXXX * - * @param[in] id mdsId - * @param[in] ipPort 指定mds的ipPort - * @param[in] mdsConf mds启动参数项, 示例: + * @param[in] id: mdsId + * @param[in] ipPort: specifies the ipPort of the mds + * @param[in] mdsConf: mds startup parameter item, example: * const std::vector mdsConf{ {"--graceful_quit_on_sigterm"}, {"--confPath=./test/integration/cluster_common/mds.basic.conf"}, }; - * @param[in] expectLeader 是否预期是leader - * @return 成功则返回pid; 失败则返回-1 + * @param[in] expectLeader: is the expected leader expected + * @return success returns pid; Failure returns -1 */ - int StartSingleMDS(int id, const std::string &ipPort, int dummyPort, - const std::vector &mdsConf, + int StartSingleMDS(int id, const std::string& ipPort, int dummyPort, + const std::vector& mdsConf, bool expectLeader); /** - * StopMDS 停止指定id的mds - * @return 0.成功; -1.失败 + * @brief StopMDS, stops the specified id's mds + * @return 0.Success; -1.Failure */ int StopMDS(int id); /** - * StopAllMDS 停止所有mds - * @return 0.成功; -1.失败 + * @brief StopAllMDS, stops all mds + * @return 0.Success; -1.Failure */ int StopAllMDS(); /** - * @brief 启动一个snapshotcloneserver + * @brief Start a snapshotcloneserver * - * @param id snapshotcloneserver 的Id - * @param ipPort ip端口 - * @param snapshotcloneConf 参数项 - * @return 成功则返回pid; 失败则返回-1 + * @param id: The ID of snapshotclone server + * @param ipPort: IP Port + * @param snapshot: clone Conf parameter item + * @return success returns pid; Failure returns -1 */ - int - StartSnapshotCloneServer(int id, const std::string &ipPort, - const std::vector &snapshotcloneConf); + int StartSnapshotCloneServer( + int id, const std::string& ipPort, + const std::vector& snapshotcloneConf); /** - * @brief 停止指定Id的snapshotcloneserver + * @brief Stop the snapshotcloneserver for the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功返回0,失败返回-1 + * @param id: The ID of the snapshotcloneserver + * @param force: Use kill -9 when it is true + * @return returns 0 for success, -1 for failure */ int StopSnapshotCloneServer(int id, bool force = false); /** - * @brief 重启指定Id的snapshotcloneserver + * @brief: Restart the snapshotcloneserver with the specified Id * - * @param id snapshotcloneserver的id - * @param force 为true时使用kill -9 - * @return 成功则返回pid; 失败则返回-1 + * @param id: The ID of the snapshotcloneserver + * @param force: Use kill -9 when it is true + * @return success returns pid; Failure returns -1 */ int RestartSnapshotCloneServer(int id, bool force = false); /** - * @brief 停止所有的snapshotcloneserver - * @return 成功返回0,失败返回-1 + * @brief Stop all snapshotcloneserver + * @return returns 0 for success, -1 for failure */ int StopAllSnapshotCloneServer(); /** - * StartSingleEtcd 启动一个etcd节点 + * @brief StartSingleEtcd starts an etcd node * * @param clientIpPort * @param peerIpPort - * @param etcdConf etcd启动项参数, 建议按照模块指定name,防止并发运行时冲突 + * @param etcdConf: etcd startup parameter, it is recommended to specify the + * name according to the module to prevent concurrent runtime conflicts * std::vector{"--name basic_test_start_stop_module1"} - * @return 成功则返回pid; 失败则返回-1 + * @return success returns pid; Failure returns -1 */ - int StartSingleEtcd(int id, const std::string &clientIpPort, - const std::string &peerIpPort, - const std::vector &etcdConf); + int StartSingleEtcd(int id, const std::string& clientIpPort, + const std::string& peerIpPort, + const std::vector& etcdConf); /** - * WaitForEtcdClusterAvalible - * 在一定时间内等待etcd集群leader选举成功,处于可用状态 + * @brief WaitForEtcdClusterAvalible + * Wait for the ETCD cluster leader election to be successful and available + * for a certain period of time */ bool WaitForEtcdClusterAvalible(int waitSec = 20); /** - * StopEtcd 停止指定id的etcd节点 - * @return 0.成功; -1.失败 + * @brief StopEtcd stops the etcd node with the specified id + * @return 0.Success; -1.Failure */ int StopEtcd(int id); /** - * StopAllEtcd 停止所有etcd节点 - * @return 0.成功; -1.失败 + * @brief StopAllEtcd stops all etcd nodes + * @return 0.Success; -1.Failure */ int StopAllEtcd(); /** - * @brief 格式化FilePool + * @brief FormatFilePool, Format FilePool * - * @param filePooldir FilePool目录 - * @param filePoolmetapath FilePool元数据目录 - * @param filesystemPath 文件系统目录 - * @param size FilePool size (GB) - * @return 成功返回0,失败返回-1 + * @param filePooldir: FilePool directory + * @param filePoolmetapath: FilePool metadata directory + * @param filesystemPath: file system directory + * @param size: FilePool size (GB) + * @return returns 0 for success, -1 for failure */ - int FormatFilePool(const std::string &filePooldir, - const std::string &filePoolmetapath, - const std::string &filesystemPath, uint32_t size); + int FormatFilePool(const std::string& filePooldir, + const std::string& filePoolmetapath, + const std::string& filesystemPath, uint32_t size); /** - * StartSingleChunkServer 启动一个chunkserver节点 + * @brief StartSingleChunkServer starts a chunkserver node * * @param[in] id * @param[in] ipPort - * @param[in] chunkserverConf chunkserver启动项,示例: + * @param[in] chunkserverConf chunkserver startup item, example: * const std::vector chunkserverConf1{ {"--graceful_quit_on_sigterm"}, {"-chunkServerStoreUri=local://./basic1/"}, @@ -243,209 +250,219 @@ class CurveCluster { {"-conf=./test/integration/cluster_common/chunkserver.basic.conf"}, {"-raft_sync_segments=true"}, }; - 建议文件名也按模块的缩写来,文件名不能太长,否则注册到数据库会失败 - * @return 成功则返回pid; 失败则返回-1 + It is recommended to also use the abbreviation of the module for the + file name. The file name should not be too long, otherwise registering + to the database will fail + * @return success returns pid; Failure returns -1 */ - int StartSingleChunkServer(int id, const std::string &ipPort, - const std::vector &chunkserverConf); + int StartSingleChunkServer(int id, const std::string& ipPort, + const std::vector& chunkserverConf); /** - * StartSingleChunkServer 在网络命名空间内启动一个指定id的chunkserver - * 无需指定ipPort + * @brief StartSingleChunkServer Starts a chunkserver with the specified id + * in the network namespace No need to specify ipPort * * @param id - * @param chunkserverConf, 同StartSingleChunkServer的示例 - * @return 成功则返回pid; 失败则返回-1 + * @param chunkserverConf: same as the example of StartSingleChunkServer + * @return success returns pid; Failure returns -1 */ int StartSingleChunkServerInBackground( - int id, const std::vector &chunkserverConf); + int id, const std::vector& chunkserverConf); /** - * StopChunkServer 停掉指定id的chunkserver进程 - * @return 0.成功; -1.失败 + * @brief StopChunkServer stops the chunkserver process with the specified + * id + * @return 0.Success; -1.Failure */ int StopChunkServer(int id); /** - * StopAllChunkServer 停止所有chunkserver - * @return 0.成功; -1.失败 + * @brief StopAllChunkServer Stop all chunkserver + * @return 0.Success; -1.Failure */ int StopAllChunkServer(); /** - * PreparePhysicalPool 创建物理池 + * @brief PreparePhysicalPool Create Physical Pool * - * @param[in] id 给指定id的mds发送命令 - * @param[in] clusterMap 拓扑信息,示例: - * ./test/integration/cluster_common/cluster_common_topo_1.txt (不同ip) + * @param[in] id: Send command to the specified mds with id + * @param[in] clusterMap: topology information, example: + * ./test/integration/cluster_common/cluster_common_topo_1.txt (different + * IPs) * ./test/integration/cluster_common/cluster_common_topo_2.txt - * (相同ip, 一定要加上port加以区分, - * chunkserver也必须和clusterMap中server的ipPort相同) - * @return 0.成功; -1.失败 + * (The same IP address must be distinguished by adding a port, + * The chunkserver must also be the same as the ipPort of the server in + * the clusterMap) + * @return 0.Success; -1.Failure */ - int PreparePhysicalPool(int mdsId, const std::string &clusterMap); + int PreparePhysicalPool(int mdsId, const std::string& clusterMap); /** - * @return 0.成功; -1.失败 + * @return 0.Success; -1.Failure */ - int PrepareLogicalPool(int mdsId, const std::string &clusterMap); + int PrepareLogicalPool(int mdsId, const std::string& clusterMap); /** - * MDSIpPort 获取指定id的mds地址 + * MDSIpPort retrieves the mds address of the specified id */ std::string MDSIpPort(int id); /** - * EtcdClientIpPort 获取指定id的etcd client地址 + * EtcdClientIpPort retrieves the etcd client address for the specified id */ std::string EtcdClientIpPort(int id); /** - * EtcdPeersIpPort 获取指定id的etcd peers地址 + * EtcdPeersIpPort retrieves the etcd Peers address of the specified id */ std::string EtcdPeersIpPort(int id); /** - * ChunkServerIpPort 获取指定id的chunkserver地址 + * ChunkServerIpPort retrieves the chunkserver address for the specified id */ std::string ChunkServerIpPort(int id); /** - * HangMDS hang住指定mds进程 - * @return 0.成功; -1.失败 + * @brief HangMDS hang resides in the specified mds process + * @return 0.Success; -1.Failure */ int HangMDS(int id); /** - * RecoverHangMDS 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * @brief RecoverHangMDS restores the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangMDS(int id); /** - * HangEtcd hang住指定etcd进程 - * @return 0.成功; -1.失败 + * @brief HangEtcd hang lives in the specified etcd process + * @return 0.Success; -1.Failure */ int HangEtcd(int id); /** - * RecoverHangEtcd 恢复hang住的mds进程 - * @return 0.成功; -1.失败 + * @brief RecoverHangEtcd recovers the mds process where hang resides + * @return 0.Success; -1.Failure */ int RecoverHangEtcd(int id); /** - * HangChunkServer hang住指定chunkserver进程 - * @return 0.成功; -1.失败 + * @brief HangChunkServer hang resides in the specified chunkserver process + * @return 0.Success; -1.Failure */ int HangChunkServer(int id); /** - * RecoverHangChunkServer 恢复hang住的chunkserver进程 - * @return 0.成功; -1.失败 + * @brief RecoverHangChunkServer Restores the chunkserver process where hang + * resides + * @return 0.Success; -1.Failure */ int RecoverHangChunkServer(int id); /** - * CurrentServiceMDS 获取当前正在提供服务的mds + * CurrentServiceMDS obtains the mds that are currently providing services * - * @param[out] curId 当前正在服务的mds编号 + * @param[out] curId: the currently serving the mds number * - * @return true表示有正在服务的mds, false表示没有正在服务的mds + * @return true indicates that there are serving mds, while false indicates + * that there are no serving mds */ - bool CurrentServiceMDS(int *curId); + bool CurrentServiceMDS(int* curId); /** - * CreateFile 在curve中创建文件 + * @brief CreateFile creates a file in Curve. * - * @param[in] user 用户 - * @param[in] pwd 密码 - * @param[in] fileName 文件名 - * @param[in] fileSize 文件大小 - * @param[in] normalFile 是否为normal file - * @return 0.成功; -1.失败 - */ - int CreateFile(const std::string &user, const std::string &pwd, - const std::string &fileName, uint64_t fileSize = 0, + * @param[in] user: User + * @param[in] pwd: Password + * @param[in] fileName: File name + * @param[in] fileSize: File size + * @param[in] normalFile: Whether it is a normal file + * @return 0. Success; -1. Failure + */ + int CreateFile(const std::string& user, const std::string& pwd, + const std::string& fileName, uint64_t fileSize = 0, bool normalFile = true, const std::string& poolset = ""); private: /** - * ProbePort 探测指定ipPort是否处于监听状态 + * @brief ProbePort checks if the specified ipPort is in a listening state. * - * @param[in] ipPort 指定的ipPort值 - * @param[in] timeoutMs 探测的超时时间,单位是ms - * @param[in] expectOpen 是否希望是监听状态 + * @param[in] ipPort: The specified ipPort value. + * @param[in] timeoutMs: The timeout for probing in milliseconds. + * @param[in] expectOpen: Whether it is expected to be in a listening state. * - * @return 0表示指定时间内的探测符合预期. -1表示指定时间内的探测不符合预期 + * @return 0 indicates that the probing meets the expected condition within + * the specified time. -1 indicates that the probing does not meet the + * expected condition within the specified time. */ - int ProbePort(const std::string &ipPort, int64_t timeoutMs, + int ProbePort(const std::string& ipPort, int64_t timeoutMs, bool expectOpen); /** - * ChunkServerIpPortInBackground - * 在需要不同ip的chunkserver的情况下,用于生成chunkserver ipPort + * @brief ChunkServerIpPortInBackground + * Used to generate chunkserver ipPort when chunkservers with different + * IPs are required */ std::string ChunkServerIpPortInBackground(int id); /** - * HangProcess hang住一个进程 + * @brief HangProcess hang a process * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid: process id + * @return 0.Success; -1.Failure */ int HangProcess(pid_t pid); /** - * RecoverHangProcess 恢复hang住的进程 + * @brief RecoverHangProcess recovers a hung process. * - * @param pid 进程id - * @return 0.成功; -1.失败 + * @param pid: Process ID + * @return 0. Success; -1. Failure */ int RecoverHangProcess(pid_t pid); private: - // 网络号 + // Network number std::string networkSegment_; - // 网络命名空间前缀 + // Network namespace prefix std::string nsPrefix_; - // mds的id对应的进程号 + // The process number corresponding to the ID of the mds std::map mdsPidMap_; - // mds的id对应的ipport + // The ipport corresponding to the ID of the mds std::map mdsIpPort_; - // snapshotcloneserver id对应的pid + // The pid corresponding to the snapshotcloneserver id std::map snapPidMap_; - // snapshotcloneserver id对应的ipPort + // The ipPort corresponding to the snapshotcloneserver ID std::map snapIpPort_; - // snapshotcloneserver id对应的conf + // Conf corresponding to snapshotcloneserver id std::map> snapConf_; - // etcd的id对应的进程号 + // The process number corresponding to the id of ETCD std::map etcdPidMap_; - // etcd的id对应的client ipport + // The client ipport corresponding to the id of ETCD std::map etcdClientIpPort_; - // etcd的id对应的peer ipport + // Peer ipport corresponding to the id of ETCD std::map etcdPeersIpPort_; - // chunkserver的id对应的进程号 + // The process number corresponding to the id of chunkserver std::map chunkserverPidMap_; - // chunkserver的id对应的ipport + // The IP port corresponding to the ID of the chunkserver std::map chunkserverIpPort_; // mdsClient std::shared_ptr mdsClient_; public: - // SnapshotCloneMetaStore用于测试过程中灌数据 + // SnapshotCloneMetaStore for filling data during testing std::shared_ptr metaStore_; }; } // namespace curve diff --git a/test/integration/common/chunkservice_op.h b/test/integration/common/chunkservice_op.h index 28f32c6891..58322ea380 100644 --- a/test/integration/common/chunkservice_op.h +++ b/test/integration/common/chunkservice_op.h @@ -24,9 +24,11 @@ #define TEST_INTEGRATION_COMMON_CHUNKSERVICE_OP_H_ #include -#include -#include + #include +#include +#include + #include "include/chunkserver/chunkserver_common.h" #include "proto/common.pb.h" @@ -40,7 +42,7 @@ using std::string; #define NULL_SN -1 struct ChunkServiceOpConf { - Peer *leaderPeer; + Peer* leaderPeer; LogicPoolID logicPoolId; CopysetID copysetId; uint32_t rpcTimeout; @@ -49,221 +51,247 @@ struct ChunkServiceOpConf { class ChunkServiceOp { public: /** - * @brief 通过chunkService写chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Write a chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param data 待写数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data: The data to be written + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int WriteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int WriteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, size_t len, - const char *data, + const char* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn: Chunk version * @param offset * @param len - * @param data 读取内容 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data: The reading content + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum sn, off_t offset, size_t len, - string *data, + static int ReadChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum sn, off_t offset, size_t len, string* data, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 通过chunkService读chunk快照 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Read chunk snapshot through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param data 读取内容 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param data: The reading content + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int ReadChunkSnapshot(struct ChunkServiceOpConf *opConf, + static int ReadChunkSnapshot(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, std::string *data); + size_t len, std::string* data); /** - * @brief 通过chunkService删除chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Delete chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param sn chunk版本 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param sn: chunk version + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int DeleteChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum sn); /** - * @brief 通过chunkService删除此次转储时产生的或者历史遗留的快照 - * 如果转储过程中没有产生快照,则修改chunk的correctedSn - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Delete the snapshot generated during this dump or historical + * legacy through chunkService If no snapshot is generated during + * the dump process, modify the correctedSn of the chunk + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param correctedSn - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf *opConf, + static int DeleteChunkSnapshotOrCorrectSn(struct ChunkServiceOpConf* opConf, ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 通过chunkService创建clone chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Create a clone chunk through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param location 源chunk在源端的位置,可能在curve或S3上 + * @param location: The location of the source chunk on the source side, + * possibly on curve or S3 * @param correctedSn * @param sn * @param chunkSize - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int CreateCloneChunk(struct ChunkServiceOpConf *opConf, - ChunkID chunkId, const std::string &location, + static int CreateCloneChunk(struct ChunkServiceOpConf* opConf, + ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 通过chunkService恢复chunk - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief Restore Chunk through ChunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int RecoverChunk(struct ChunkServiceOpConf *opConf, ChunkID chunkId, + static int RecoverChunk(struct ChunkServiceOpConf* opConf, ChunkID chunkId, off_t offset, size_t len); /** - * @brief 通过chunkService获取chunk元数据 - * @param opConf,leaderPeer/copysetid等公共配置参数 + * @brief: Obtain chunk metadata through chunkService + * @param opConf: Common configuration parameters such as, + * leaderPeer/copyset, etc * @param chunkId - * @param curSn 返回当前chunk版本 - * @param snapSn 返回快照chunk版本 - * @param redirectedLeader 返回重定向主节点 - * @return 请求执行失败则返回-1,否则返回错误码 + * @param curSn: returns the current chunk version + * @param snapSn: returns the snapshot chunk version + * @param redirectedLeader returns the redirected master node + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ - static int GetChunkInfo(struct ChunkServiceOpConf *opConf, ChunkID chunkId, - SequenceNum *curSn, SequenceNum *snapSn, - string *redirectedLeader); + static int GetChunkInfo(struct ChunkServiceOpConf* opConf, ChunkID chunkId, + SequenceNum* curSn, SequenceNum* snapSn, + string* redirectedLeader); }; class ChunkServiceVerify { public: - explicit ChunkServiceVerify(struct ChunkServiceOpConf *opConf) + explicit ChunkServiceVerify(struct ChunkServiceOpConf* opConf) : opConf_(opConf) {} /** - * @brief 执行写chunk, 并将数据写入到chunkdata对应区域,以便于后续验证数据。 + * @brief Executes the write chunk and writes the data to the corresponding + * area of chunkdata for subsequent data validation. * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param data 待写数据 - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 返回写操作的错误码 + * @param data: the data to be written + * @param chunkData: Expected data for the entire chunk + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return returns the error code for the write operation */ int VerifyWriteChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, const char *data, string *chunkData, + size_t len, const char* data, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk, 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Executes the read chunk and verifies whether the read content + * matches the expected data in the corresponding region of the + * chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @param cloneFileSource clone源的文件路径 - * @param cloneFileOffset clone chunk在clone源中的相对偏移 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData: Expected data for the entire chunk + * @param cloneFileSource: The file path of the clone source + * @param cloneFileOffset: Relative offset of clone chunk in clone source + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunk(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData, + size_t len, string* chunkData, const std::string& cloneFileSource = "", off_t cloneFileOffset = 0); /** - * @brief 执行读chunk快照, - * 并验证读取内容是否与chunkdata对应区域的预期数据吻合。 + * @brief Execute read chunk snapshot, + * And verify whether the read content matches the expected data in + * the corresponding area of chunkdata. * @param chunkId - * @param sn chunk版本 + * @param sn: chunk version * @param offset * @param len - * @param chunkData 整个chunk的预期数据 - * @return 读请求结果符合预期返回0,否则返回-1 + * @param chunkData: Expected data for the entire chunk + * @return The read request result meets the expected return of 0, + * otherwise it returns -1 */ int VerifyReadChunkSnapshot(ChunkID chunkId, SequenceNum sn, off_t offset, - size_t len, string *chunkData); + size_t len, string* chunkData); /** - * @brief 删除chunk + * @brief delete chunk * @param chunkId - * @param sn chunk版本 - * @return 返回删除操作的错误码 + * @param sn: chunk version + * @return returns the error code for the delete operation */ int VerifyDeleteChunk(ChunkID chunkId, SequenceNum sn); /** - * @brief 删除chunk的快照 + * @brief Delete the snapshot of the chunk * @param chunkId * @param correctedSn - * @return 返回删除操作的错误码 + * @return returns the error code for the delete operation */ int VerifyDeleteChunkSnapshotOrCorrectSn(ChunkID chunkId, SequenceNum correctedSn); /** - * @brief 创建clone chunk + * @brief Create clone chunk * @param chunkId - * @param location 源地址 + * @param location: source address * @param correctedSn * @param sn * @param chunkSize - * @return 返回创建操作的错误码 + * @return returns the error code for the creation operation */ - int VerifyCreateCloneChunk(ChunkID chunkId, const std::string &location, + int VerifyCreateCloneChunk(ChunkID chunkId, const std::string& location, uint64_t correctedSn, uint64_t sn, uint64_t chunkSize); /** - * @brief 恢复chunk + * @brief Restore chunk * @param chunkId * @param offset * @param len - * @return 请求执行失败则返回-1,否则返回错误码 + * @return If the request fails to execute, -1 will be returned, otherwise + * an error code will be returned */ int VerifyRecoverChunk(ChunkID chunkId, off_t offset, size_t len); /** - * @brief 获取chunk元数据,并检验结果是否符合预期 + * @brief To obtain chunk metadata and verify if the results meet + * expectations * @param chunkId - * @param expCurSn 预期chunk版本,-1表示不存在 - * @param expSanpSn 预期快照版本,-1表示不存在 - * @param expLeader 预期redirectedLeader - * @return 验证成功返回0,否则返回-1 + * @param expCurSn: Expected chunk version, -1 indicates non-existent + * @param expSanpSn: Expected snapshot version, -1 indicates non-existent + * @param expLeader: Expected redirectedLeader + * @return returns 0 after successful verification, otherwise returns -1 */ int VerifyGetChunkInfo(ChunkID chunkId, SequenceNum expCurSn, SequenceNum expSnapSn, string expLeader); private: - struct ChunkServiceOpConf *opConf_; - // 记录写过的chunkId(预期存在),用于判断请求的返回值是否符合预期 + struct ChunkServiceOpConf* opConf_; + // Record the chunkId (expected existence) that has been written, used to + // determine whether the return value of the request meets expectations std::set existChunks_; }; diff --git a/test/integration/heartbeat/common.cpp b/test/integration/heartbeat/common.cpp index 5d09293287..ae597506bc 100644 --- a/test/integration/heartbeat/common.cpp +++ b/test/integration/heartbeat/common.cpp @@ -21,44 +21,44 @@ */ #include "test/integration/heartbeat/common.h" + #include "test/mds/mock/mock_alloc_statistic.h" namespace curve { namespace mds { -void HeartbeatIntegrationCommon::PrepareAddPoolset( - const Poolset &poolset) { +void HeartbeatIntegrationCommon::PrepareAddPoolset(const Poolset& poolset) { int ret = topology_->AddPoolset(poolset); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } void HeartbeatIntegrationCommon::PrepareAddLogicalPool( - const LogicalPool &lpool) { + const LogicalPool& lpool) { int ret = topology_->AddLogicalPool(lpool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; } void HeartbeatIntegrationCommon::PrepareAddPhysicalPool( - const PhysicalPool &ppool) { + const PhysicalPool& ppool) { int ret = topology_->AddPhysicalPool(ppool); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret); } -void HeartbeatIntegrationCommon::PrepareAddZone(const Zone &zone) { +void HeartbeatIntegrationCommon::PrepareAddZone(const Zone& zone) { int ret = topology_->AddZone(zone); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } -void HeartbeatIntegrationCommon::PrepareAddServer(const Server &server) { +void HeartbeatIntegrationCommon::PrepareAddServer(const Server& server) { int ret = topology_->AddServer(server); EXPECT_EQ(topology::kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void HeartbeatIntegrationCommon::PrepareAddChunkServer( - const ChunkServer &chunkserver) { + const ChunkServer& chunkserver) { ChunkServer cs(chunkserver); cs.SetOnlineState(OnlineState::ONLINE); int ret = topology_->AddChunkServer(cs); @@ -68,7 +68,7 @@ void HeartbeatIntegrationCommon::PrepareAddChunkServer( void HeartbeatIntegrationCommon::PrepareAddCopySet( CopySetIdType copysetId, PoolIdType logicalPoolId, - const std::set &members) { + const std::set& members) { CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); int ret = topology_->AddCopySet(cs); @@ -78,10 +78,10 @@ void HeartbeatIntegrationCommon::PrepareAddCopySet( void HeartbeatIntegrationCommon::UpdateCopysetTopo( CopySetIdType copysetId, PoolIdType logicalPoolId, uint64_t epoch, - ChunkServerIdType leader, const std::set &members, + ChunkServerIdType leader, const std::set& members, ChunkServerIdType candidate) { ::curve::mds::topology::CopySetInfo copysetInfo; - ASSERT_TRUE(topology_->GetCopySet(CopySetKey{ logicalPoolId, copysetId }, + ASSERT_TRUE(topology_->GetCopySet(CopySetKey{logicalPoolId, copysetId}, ©setInfo)); copysetInfo.SetEpoch(epoch); copysetInfo.SetLeader(leader); @@ -93,8 +93,8 @@ void HeartbeatIntegrationCommon::UpdateCopysetTopo( } void HeartbeatIntegrationCommon::SendHeartbeat( - const ChunkServerHeartbeatRequest &request, bool expectFailed, - ChunkServerHeartbeatResponse *response) { + const ChunkServerHeartbeatRequest& request, bool expectFailed, + ChunkServerHeartbeatResponse* response) { // init brpc client brpc::Channel channel; ASSERT_EQ(0, channel.Init(listenAddr_.c_str(), NULL)); @@ -109,7 +109,7 @@ void HeartbeatIntegrationCommon::SendHeartbeat( } void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( - ChunkServerIdType id, ChunkServerHeartbeatRequest *req) { + ChunkServerIdType id, ChunkServerHeartbeatRequest* req) { ChunkServer out; EXPECT_TRUE(topology_->GetChunkServer(id, &out)) << "get chunkserver: " << id << " fail"; @@ -139,7 +139,7 @@ void HeartbeatIntegrationCommon::BuildBasicChunkServerRequest( } void HeartbeatIntegrationCommon::AddCopySetToRequest( - ChunkServerHeartbeatRequest *req, const CopySetInfo &csInfo, + ChunkServerHeartbeatRequest* req, const CopySetInfo& csInfo, ConfigChangeType type) { auto info = req->add_copysetinfos(); info->set_logicalpoolid(csInfo.GetLogicalPoolId()); @@ -170,7 +170,7 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( << "get chunkserver: " << csInfo.GetCandidate() << " error"; std::string ipport = out.GetHostIp() + ":" + std::to_string(out.GetPort()) + ":0"; - ConfigChangeInfo *confChxInfo = new ConfigChangeInfo(); + ConfigChangeInfo* confChxInfo = new ConfigChangeInfo(); auto replica = new ::curve::common::Peer(); replica->set_address(ipport.c_str()); confChxInfo->set_allocated_peer(replica); @@ -180,13 +180,13 @@ void HeartbeatIntegrationCommon::AddCopySetToRequest( } } -void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator &op) { +void HeartbeatIntegrationCommon::AddOperatorToOpController(const Operator& op) { auto opController = coordinator_->GetOpController(); ASSERT_TRUE(opController->AddOperator(op)); } void HeartbeatIntegrationCommon::RemoveOperatorFromOpController( - const CopySetKey &id) { + const CopySetKey& id) { auto opController = coordinator_->GetOpController(); opController->RemoveOperator(id); } @@ -243,11 +243,11 @@ void HeartbeatIntegrationCommon::PrepareBasicCluseter() { PrepareAddChunkServer(cs3); // add copyset - PrepareAddCopySet(1, 1, std::set{ 1, 2, 3 }); + PrepareAddCopySet(1, 1, std::set{1, 2, 3}); } void HeartbeatIntegrationCommon::InitHeartbeatOption( - Configuration *conf, HeartbeatOption *heartbeatOption) { + Configuration* conf, HeartbeatOption* heartbeatOption) { heartbeatOption->heartbeatIntervalMs = conf->GetIntValue("mds.heartbeat.intervalMs"); heartbeatOption->heartbeatMissTimeOutMs = @@ -259,7 +259,7 @@ void HeartbeatIntegrationCommon::InitHeartbeatOption( } void HeartbeatIntegrationCommon::InitSchedulerOption( - Configuration *conf, ScheduleOption *scheduleOption) { + Configuration* conf, ScheduleOption* scheduleOption) { scheduleOption->enableCopysetScheduler = conf->GetBoolValue("mds.enable.copyset.scheduler"); scheduleOption->enableLeaderScheduler = @@ -305,22 +305,20 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { auto idGen = std::make_shared(); auto tokenGen = std::make_shared(); - auto topologyStorage = - std::make_shared(); + auto topologyStorage = std::make_shared(); topology_ = std::make_shared(idGen, tokenGen, topologyStorage); ASSERT_EQ(kTopoErrCodeSuccess, topology_->Init(topologyOption)); // init topology manager - topologyStat_ = - std::make_shared(topology_); + topologyStat_ = std::make_shared(topology_); topologyStat_->Init(); auto copysetManager = std::make_shared(CopysetOption()); auto allocStat = std::make_shared(); auto topologyServiceManager = std::make_shared( topology_, topologyStat_, nullptr, copysetManager, nullptr); - // 初始化basic集群 + // Initialize basic cluster PrepareBasicCluseter(); // init coordinator @@ -341,7 +339,7 @@ void HeartbeatIntegrationCommon::BuildBasicCluster() { heartbeatManager_->Init(); heartbeatManager_->Run(); - // 启动心跳rpc + // Start heartbeat rpc listenAddr_ = conf_.GetStringValue("mds.listen.addr"); heartbeatService_ = std::make_shared(heartbeatManager_); diff --git a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp index d4ccb66c65..8ed3364576 100644 --- a/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp +++ b/test/integration/snapshotcloneserver/snapshotcloneserver_recover_test.cpp @@ -20,20 +20,20 @@ * Author: xuchaojie */ -#include -#include #include +#include +#include #include -#include "src/common/uuid.h" -#include "src/common/location_operator.h" -#include "test/integration/cluster_common/cluster.h" #include "src/client/libcurve_file.h" -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/snapshotcloneserver/clone/clone_service_manager.h" -#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" +#include "src/common/location_operator.h" #include "src/common/snapshotclone/snapshotclone_define.h" +#include "src/common/uuid.h" +#include "src/snapshotcloneserver/clone/clone_service_manager.h" #include "src/snapshotcloneserver/common/snapshotclone_meta_store.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/integration/cluster_common/cluster.h" +#include "test/integration/snapshotcloneserver/test_snapshotcloneserver_helpler.h" using curve::CurveCluster; using curve::client::FileClient; @@ -49,27 +49,27 @@ const uint64_t chunkSize = 16ULL * 1024 * 1024; const uint64_t segmentSize = 32ULL * 1024 * 1024; const uint64_t chunkSplitSize = 8388608; -// 测试文件只写2个segment +// Write only 2 segments in the test file const uint64_t testFile1AllocSegmentNum = 2; -// 一些常数定义 -const char *cloneTempDir_ = "/clone"; -const char *mdsRootUser_ = "root"; -const char *mdsRootPassword_ = "root_password"; +// Some constant definitions +const char* cloneTempDir_ = "/clone"; +const char* mdsRootUser_ = "root"; +const char* mdsRootPassword_ = "root_password"; constexpr uint32_t kProgressTransferSnapshotDataStart = 10; -const char *kEtcdClientIpPort = "127.0.0.1:10021"; -const char *kEtcdPeerIpPort = "127.0.0.1:10022"; -const char *kMdsIpPort = "127.0.0.1:10023"; -const char *kChunkServerIpPort1 = "127.0.0.1:10024"; -const char *kChunkServerIpPort2 = "127.0.0.1:10025"; -const char *kChunkServerIpPort3 = "127.0.0.1:10026"; -const char *kSnapshotCloneServerIpPort = "127.0.0.1:10027"; -const char *kSnapshotCloneServerDummyServerPort = "12002"; -const char *kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; +const char* kEtcdClientIpPort = "127.0.0.1:10021"; +const char* kEtcdPeerIpPort = "127.0.0.1:10022"; +const char* kMdsIpPort = "127.0.0.1:10023"; +const char* kChunkServerIpPort1 = "127.0.0.1:10024"; +const char* kChunkServerIpPort2 = "127.0.0.1:10025"; +const char* kChunkServerIpPort3 = "127.0.0.1:10026"; +const char* kSnapshotCloneServerIpPort = "127.0.0.1:10027"; +const char* kSnapshotCloneServerDummyServerPort = "12002"; +const char* kLeaderCampaginPrefix = "snapshotcloneserverleaderlock1"; -static const char *kDefaultPoolset = "default"; +static const char* kDefaultPoolset = "default"; const int kMdsDummyPort = 10028; @@ -79,27 +79,26 @@ const std::string kMdsConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_mds.conf"; -const std::string kCSConfigPath = // NOLINT +const std::string kCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_chunkserver.conf"; -const std::string kCsClientConfigPath = // NOLINT +const std::string kCsClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_cs_client.conf"; -const std::string kSnapClientConfigPath = // NOLINT +const std::string kSnapClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_snap_client.conf"; -const std::string kS3ConfigPath = // NOLINT - "./test/integration/snapshotcloneserver/config/" + kTestPrefix + - "_s3.conf"; +const std::string kS3ConfigPath = // NOLINT + "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_s3.conf"; -const std::string kSCSConfigPath = // NOLINT +const std::string kSCSConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_scs.conf"; -const std::string kClientConfigPath = // NOLINT +const std::string kClientConfigPath = // NOLINT "./test/integration/snapshotcloneserver/config/" + kTestPrefix + "_client.conf"; @@ -120,11 +119,11 @@ const std::vector mdsConfigOptions{ }; const std::vector mdsConf1{ - { "--graceful_quit_on_sigterm" }, + {"--graceful_quit_on_sigterm"}, std::string("--confPath=") + kMdsConfigPath, std::string("--log_dir=") + kLogPath, std::string("--segmentSize=") + std::to_string(segmentSize), - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector chunkserverConfigOptions{ @@ -151,66 +150,63 @@ const std::vector snapClientConfigOptions{ const std::vector s3ConfigOptions{}; const std::vector chunkserverConf1{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "1/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "1/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "1/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "1/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "1/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "1/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "1/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "1/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "1/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "1/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "1/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "1/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "1/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "1/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "1/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "1/walfilepool.meta"}, }; const std::vector chunkserverConf2{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "2/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "2/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "2/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "2/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "2/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "2/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "2/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "2/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "2/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "2/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "2/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "2/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "2/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "2/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "2/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "2/walfilepool.meta"}, }; const std::vector chunkserverConf3{ - { "--graceful_quit_on_sigterm" }, - { "-chunkServerStoreUri=local://./" + kTestPrefix + "3/" }, - { "-chunkServerMetaUri=local://./" + kTestPrefix + - "3/chunkserver.dat" }, // NOLINT - { "-copySetUri=local://./" + kTestPrefix + "3/copysets" }, - { "-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-recycleUri=local://./" + kTestPrefix + "3/recycler" }, - { "-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/" }, - { "-chunkFilePoolMetaPath=./" + kTestPrefix + - "3/chunkfilepool.meta" }, // NOLINT + {"--graceful_quit_on_sigterm"}, + {"-chunkServerStoreUri=local://./" + kTestPrefix + "3/"}, + {"-chunkServerMetaUri=local://./" + kTestPrefix + + "3/chunkserver.dat"}, // NOLINT + {"-copySetUri=local://./" + kTestPrefix + "3/copysets"}, + {"-raftSnapshotUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-recycleUri=local://./" + kTestPrefix + "3/recycler"}, + {"-chunkFilePoolDir=./" + kTestPrefix + "3/chunkfilepool/"}, + {"-chunkFilePoolMetaPath=./" + kTestPrefix + + "3/chunkfilepool.meta"}, // NOLINT std::string("-conf=") + kCSConfigPath, - { "-raft_sync_segments=true" }, + {"-raft_sync_segments=true"}, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, - { "-raftLogUri=curve://./" + kTestPrefix + "3/copysets" }, - { "-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/" }, - { "-walFilePoolMetaPath=./" + kTestPrefix + - "3/walfilepool.meta" }, + {"--stderrthreshold=3"}, + {"-raftLogUri=curve://./" + kTestPrefix + "3/copysets"}, + {"-walFilePoolDir=./" + kTestPrefix + "3/walfilepool/"}, + {"-walFilePoolMetaPath=./" + kTestPrefix + "3/walfilepool.meta"}, }; const std::vector snapshotcloneserverConfigOptions{ @@ -237,7 +233,7 @@ const std::vector snapshotcloneserverConfigOptions{ const std::vector snapshotcloneConf{ std::string("--conf=") + kSCSConfigPath, std::string("--log_dir=") + kLogPath, - { "--stderrthreshold=3" }, + {"--stderrthreshold=3"}, }; const std::vector clientConfigOptions{ @@ -246,8 +242,8 @@ const std::vector clientConfigOptions{ std::string("mds.rpcTimeoutMS=4000"), }; -const char *testFile1_ = "/RcvItUser1/file1"; -const char *testUser1_ = "RcvItUser1"; +const char* testFile1_ = "/RcvItUser1/file1"; +const char* testUser1_ = "RcvItUser1"; int testFd1_ = 0; namespace curve { @@ -262,16 +258,16 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_ = new CurveCluster(); ASSERT_NE(nullptr, cluster_); - // 初始化db + // Initialize db system(std::string("rm -rf " + kTestPrefix + ".etcd").c_str()); system(std::string("rm -rf " + kTestPrefix + "1").c_str()); system(std::string("rm -rf " + kTestPrefix + "2").c_str()); system(std::string("rm -rf " + kTestPrefix + "3").c_str()); - // 启动etcd + // Start etcd pid_t pid = cluster_->StartSingleEtcd( 1, kEtcdClientIpPort, kEtcdPeerIpPort, - std::vector{ "--name=" + kTestPrefix }); + std::vector{"--name=" + kTestPrefix}); LOG(INFO) << "etcd 1 started on " << kEtcdClientIpPort << "::" << kEtcdPeerIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); @@ -281,13 +277,13 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kMdsConfigPath, mdsConfigOptions); - // 启动一个mds + // Start an mds pid = cluster_->StartSingleMDS(1, kMdsIpPort, kMdsDummyPort, mdsConf1, true); LOG(INFO) << "mds 1 started on " << kMdsIpPort << ", pid = " << pid; ASSERT_GT(pid, 0); - // 创建物理池 + // Creating a physical pool ASSERT_EQ(0, cluster_->PreparePhysicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -296,21 +292,18 @@ class SnapshotCloneServerTest : public ::testing::Test { // format chunkfilepool and walfilepool std::vector threadpool(3); - threadpool[0] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "1/chunkfilepool/", - "./" + kTestPrefix + "1/chunkfilepool.meta", - "./" + kTestPrefix + "1/chunkfilepool/", 2); - threadpool[1] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "2/chunkfilepool/", - "./" + kTestPrefix + "2/chunkfilepool.meta", - "./" + kTestPrefix + "2/chunkfilepool/", 2); - threadpool[2] = - std::thread(&CurveCluster::FormatFilePool, cluster_, - "./" + kTestPrefix + "3/chunkfilepool/", - "./" + kTestPrefix + "3/chunkfilepool.meta", - "./" + kTestPrefix + "3/chunkfilepool/", 2); + threadpool[0] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "1/chunkfilepool/", + "./" + kTestPrefix + "1/chunkfilepool.meta", + "./" + kTestPrefix + "1/chunkfilepool/", 2); + threadpool[1] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "2/chunkfilepool/", + "./" + kTestPrefix + "2/chunkfilepool.meta", + "./" + kTestPrefix + "2/chunkfilepool/", 2); + threadpool[2] = std::thread(&CurveCluster::FormatFilePool, cluster_, + "./" + kTestPrefix + "3/chunkfilepool/", + "./" + kTestPrefix + "3/chunkfilepool.meta", + "./" + kTestPrefix + "3/chunkfilepool/", 2); for (int i = 0; i < 3; i++) { threadpool[i].join(); } @@ -324,7 +317,7 @@ class SnapshotCloneServerTest : public ::testing::Test { cluster_->PrepareConfig(kCSConfigPath, chunkserverConfigOptions); - // 创建chunkserver + // Create chunkserver pid = cluster_->StartSingleChunkServer(1, kChunkServerIpPort1, chunkserverConf1); LOG(INFO) << "chunkserver 1 started on " << kChunkServerIpPort1 @@ -343,7 +336,8 @@ class SnapshotCloneServerTest : public ::testing::Test { std::this_thread::sleep_for(std::chrono::seconds(5)); - // 创建逻辑池, 并睡眠一段时间让底层copyset先选主 + // Create a logical pool and sleep for a period of time to let the + // underlying copyset select the primary first ASSERT_EQ(0, cluster_->PrepareLogicalPool( 1, "./test/integration/snapshotcloneserver/" @@ -381,9 +375,9 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(INFO) << "Write testFile1_ success."; } - static bool CreateAndWriteFile(const std::string &fileName, - const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool CreateAndWriteFile(const std::string& fileName, + const std::string& user, + const std::string& dataSample, int* fdOut) { UserInfo_t userinfo; userinfo.owner = user; int ret = fileClient_->Create(fileName, userinfo, testFile1Length); @@ -394,8 +388,8 @@ class SnapshotCloneServerTest : public ::testing::Test { return WriteFile(fileName, user, dataSample, fdOut); } - static bool WriteFile(const std::string &fileName, const std::string &user, - const std::string &dataSample, int *fdOut) { + static bool WriteFile(const std::string& fileName, const std::string& user, + const std::string& dataSample, int* fdOut) { int ret = 0; UserInfo_t userinfo; userinfo.owner = user; @@ -404,7 +398,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LOG(ERROR) << "Open fail, ret = " << *fdOut; return false; } - // 2个segment,每个写第一个chunk + // 2 segments, each with the first chunk written for (uint64_t i = 0; i < testFile1AllocSegmentNum; i++) { ret = fileClient_->Write(*fdOut, dataSample.c_str(), i * segmentSize, dataSample.size()); @@ -421,14 +415,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return true; } - static bool CheckFileData(const std::string &fileName, - const std::string &user, - const std::string &dataSample) { + static bool CheckFileData(const std::string& fileName, + const std::string& user, + const std::string& dataSample) { UserInfo_t userinfo; userinfo.owner = user; int ret = 0; - // 检查文件状态 + // Check file status FInfo fileInfo; ret = snapClient_->GetFileInfo(fileName, userinfo, &fileInfo); if (ret < 0) { @@ -490,7 +484,7 @@ class SnapshotCloneServerTest : public ::testing::Test { void TearDown() {} - void PrepareSnapshotForTestFile1(std::string *uuid1) { + void PrepareSnapshotForTestFile1(std::string* uuid1) { if (!hasSnapshotForTestFile1_) { int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", uuid1); ASSERT_EQ(0, ret); @@ -509,23 +503,23 @@ class SnapshotCloneServerTest : public ::testing::Test { } } - int PrepareCreateCloneFile(const std::string &fileName, FInfo *fInfoOut, + int PrepareCreateCloneFile(const std::string& fileName, FInfo* fInfoOut, bool IsRecover = false) { uint64_t seqNum = 1; if (IsRecover) { - seqNum = 2; // 恢复新文件使用版本号+1 + seqNum = 2; // Restore new files using version number+1 } else { - seqNum = 1; // 克隆新文件使用初始版本号1 + seqNum = 1; // Clone new files using initial version number 1 } int ret = snapClient_->CreateCloneFile( - testFile1_, fileName, - UserInfo_t(mdsRootUser_, mdsRootPassword_), testFile1Length, - seqNum, chunkSize, 0, 0, kDefaultPoolset, fInfoOut); + testFile1_, fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_), + testFile1Length, seqNum, chunkSize, 0, 0, kDefaultPoolset, + fInfoOut); return ret; } - int PrepareCreateCloneMeta(FInfo *fInfoOut, const std::string &newFileName, - std::vector *segInfoOutVec) { + int PrepareCreateCloneMeta(FInfo* fInfoOut, const std::string& newFileName, + std::vector* segInfoOutVec) { fInfoOut->fullPathName = newFileName; fInfoOut->userinfo = UserInfo_t(mdsRootUser_, mdsRootPassword_); for (int i = 0; i < testFile1AllocSegmentNum; i++) { @@ -540,7 +534,7 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCreateCloneChunk(const std::vector &segInfoVec, + int PrepareCreateCloneChunk(const std::vector& segInfoVec, bool IsRecover = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -555,13 +549,14 @@ class SnapshotCloneServerTest : public ::testing::Test { name.chunkIndex_ = i * segmentSize / chunkSize; std::string location = LocationOperator::GenerateS3Location(name.ToDataChunkKey()); - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Create第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Create全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So cloning each segment from the snapshot + // only creates the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to create all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -571,8 +566,10 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", seqNum = " << 1 << ", csn = " << 2; int ret = snapClient_->CreateCloneChunk( location, cidInfo, - 1, // 恢复使用快照中chunk的版本号 - 2, // 恢复使用新文件的版本号, 即原文件版本号+1 + 1, // Restore the version number of the chunk in the + // snapshot + 2, // Restore the version number of the new file, which is + // the original file version number+1 chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; @@ -585,7 +582,7 @@ class SnapshotCloneServerTest : public ::testing::Test { LocationOperator::GenerateCurveLocation( testFile1_, i * segmentSize + j * chunkSize); ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); LOG(INFO) << "CreateCloneChunk, location = " << location @@ -593,11 +590,11 @@ class SnapshotCloneServerTest : public ::testing::Test { << ", copysetId = " << cidInfo.cpid_ << ", chunkId = " << cidInfo.cid_ << ", seqNum = " << 1 << ", csn = " << 0; - int ret = - snapClient_->CreateCloneChunk(location, cidInfo, - 1, // 克隆使用初始版本号1 - 0, // 克隆使用0 - chunkSize, cb); + int ret = snapClient_->CreateCloneChunk( + location, cidInfo, + 1, // Clone using initial version number 1 + 0, // Clone using 0 + chunkSize, cb); if (ret != LIBCURVE_ERROR::OK) { return ret; } @@ -614,14 +611,14 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneMeta(const std::string &uuid) { + int PrepareCompleteCloneMeta(const std::string& uuid) { std::string fileName = std::string(cloneTempDir_) + "/" + uuid; int ret = snapClient_->CompleteCloneMeta( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); return ret; } - int PrepareRecoverChunk(const std::vector &segInfoVec, + int PrepareRecoverChunk(const std::vector& segInfoVec, bool IsSnapshot = false) { if (segInfoVec.size() != testFile1AllocSegmentNum) { LOG(ERROR) << "internal error!"; @@ -630,14 +627,15 @@ class SnapshotCloneServerTest : public ::testing::Test { auto tracker = std::make_shared(); if (IsSnapshot) { for (int i = 0; i < testFile1AllocSegmentNum; i++) { - // 由于测试文件每个segment只写了第一个chunk, - // 快照可以做到只转储当前写过的chunk, - // 所以从快照克隆每个segment只Recover第一个chunk。 - // 而从文件克隆,由于mds不知道chunk写没写过, - // 所以需要Recover全部的chunk。 + // Due to the fact that each segment in the test file only + // writes the first chunk, Snapshots can only dump the currently + // written chunks, So clone each segment from the snapshot and + // only recover the first chunk. And when cloning from a file, + // because mds doesn't know if chunk has been written or not, So + // we need to recover all chunks. ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[0]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -658,7 +656,7 @@ class SnapshotCloneServerTest : public ::testing::Test { for (uint64_t j = 0; j < segmentSize / chunkSize; j++) { ChunkIDInfo cidInfo = segInfoVec[i].chunkvec[j]; for (uint64_t k = 0; k < chunkSize / chunkSplitSize; k++) { - SnapCloneCommonClosure *cb = + SnapCloneCommonClosure* cb = new SnapCloneCommonClosure(tracker); tracker->AddOneTrace(); uint64_t offset = k * chunkSplitSize; @@ -686,44 +684,42 @@ class SnapshotCloneServerTest : public ::testing::Test { return LIBCURVE_ERROR::OK; } - int PrepareCompleteCloneFile(const std::string &fileName) { + int PrepareCompleteCloneFile(const std::string& fileName) { return snapClient_->CompleteCloneFile( fileName, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } - int PrepareChangeOwner(const std::string &fileName) { + int PrepareChangeOwner(const std::string& fileName) { return fileClient_->ChangeOwner( fileName, testUser1_, UserInfo_t(mdsRootUser_, mdsRootPassword_)); } int PrepareRenameCloneFile(uint64_t originId, uint64_t destinationId, - const std::string &fileName, - const std::string &newFileName) { + const std::string& fileName, + const std::string& newFileName) { return snapClient_->RenameCloneFile( UserInfo_t(mdsRootUser_, mdsRootPassword_), originId, destinationId, fileName, newFileName); } - static CurveCluster *cluster_; - static FileClient *fileClient_; - static SnapshotClient *snapClient_; + static CurveCluster* cluster_; + static FileClient* fileClient_; + static SnapshotClient* snapClient_; bool hasSnapshotForTestFile1_ = false; std::string snapIdForTestFile1_; }; -CurveCluster *SnapshotCloneServerTest::cluster_ = nullptr; -FileClient *SnapshotCloneServerTest::fileClient_ = nullptr; -SnapshotClient *SnapshotCloneServerTest::snapClient_ = nullptr; +CurveCluster* SnapshotCloneServerTest::cluster_ = nullptr; +FileClient* SnapshotCloneServerTest::fileClient_ = nullptr; +SnapshotClient* SnapshotCloneServerTest::snapClient_ = nullptr; -// 未在curve中创建快照阶段,重启恢复 +// Failed to create snapshot phase in curve, restart recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -740,19 +736,18 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenNotCreateSnapOnCurvefs) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,但成功结果未返回,重启恢复 +// A snapshot has been created in the curve, but the successful result has not +// been returned. Restart for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsNotReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); - SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, - "snapxxx", 0, chunkSize, - segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", 0, + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -769,18 +764,18 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照,结果已返回,重启恢复 +// A snapshot has been created in the curve, and the results have been returned. +// Restart to recover TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasCreateSnapOnCurvefsReturn) { - // 调用client接口创建快照 + // Calling the client interface to create a snapshot uint64_t seq = 0; snapClient_->CreateSnapShot(testFile1_, UserInfo_t(testUser1_, ""), &seq); std::string uuid1 = UUIDGenerator().GenerateUUID(); SnapshotInfo snapInfo(uuid1, testUser1_, testFile1_, "snapxxx", seq, - chunkSize, segmentSize, testFile1Length, - 0, 0, kDefaultPoolset, 0, - Status::pending); + chunkSize, segmentSize, testFile1Length, 0, 0, + kDefaultPoolset, 0, Status::pending); cluster_->metaStore_->AddSnapshot(snapInfo); pid_t pid = cluster_->RestartSnapshotCloneServer(1); @@ -797,7 +792,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// 已在curve中创建快照阶段,nos上传部分快照,重启恢复 +// The snapshot phase has been created in the curve. NOS uploads some snapshots +// and restarts for recovery TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { std::string uuid1; int ret = MakeSnapshot(testUser1_, testFile1_, "snap1", &uuid1); @@ -812,7 +808,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { } if (info1.GetSnapshotInfo().GetStatus() == Status::pending) { if (info1.GetSnapProgress() > kProgressTransferSnapshotDataStart) { - // 当进度到达转储的百分比时重启 + // Restart when the progress reaches the percentage of the dump pid_t pid = cluster_->RestartSnapshotCloneServer(1, true); LOG(INFO) << "SnapshotCloneServer 1 restarted, pid = " << pid; ASSERT_GT(pid, 0); @@ -836,16 +832,14 @@ TEST_F(SnapshotCloneServerTest, TestRecoverSnapshotWhenHasTransferSomeData) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上未创建文件 +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -860,7 +854,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasCreateCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -870,12 +865,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneHasCreateCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, 0, 0, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, 0, 0, 0, CloneFileType::kFile, + false, CloneStep::kCreateCloneFile, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -890,7 +883,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -898,12 +891,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -918,7 +909,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -932,12 +924,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -952,7 +942,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -964,12 +955,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCreateCloneChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -984,7 +973,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneCreateCloneChunkSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1000,12 +990,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCreateCloneChunkSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCreateCloneChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCreateCloneChunk, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1020,7 +1008,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1034,12 +1022,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneMeta"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1054,7 +1040,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneMetaSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1072,12 +1059,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneMetaSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneMeta, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneMeta, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1092,7 +1077,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1108,12 +1093,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRecoverChunk"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1128,7 +1111,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1147,12 +1131,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneRecoverChunkSuccssNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRecoverChunk, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kRecoverChunk, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1167,7 +1149,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneRecoverChunkSuccssNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1185,12 +1167,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotCompleteCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1205,7 +1185,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneCompleteCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1227,12 +1208,10 @@ TEST_F(SnapshotCloneServerTest, std::string dstFile = "/RcvItUser1/TestRecoverCloneCompleteCloneFileSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kCompleteCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kCompleteCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1247,7 +1226,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1267,12 +1246,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotChangeOwner"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1287,7 +1264,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1310,12 +1288,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { std::string dstFile = "/RcvItUser1/TestRecoverCloneChangeOwnerSuccessNotReturn"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kChangeOwner, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, CloneStep::kChangeOwner, + CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1330,7 +1306,7 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneChangeOwnerSuccessNotReturn) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { std::string uuid1 = UUIDGenerator().GenerateUUID(); std::string fileName = std::string(cloneTempDir_) + "/" + uuid1; @@ -1352,12 +1328,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); std::string dstFile = "/RcvItUser1/TestRecoverCloneHasNotRenameCloneFile"; - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1372,7 +1346,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneRenameCloneFileSuccessNotReturn) { std::string uuid1 = UUIDGenerator().GenerateUUID(); @@ -1400,12 +1375,10 @@ TEST_F(SnapshotCloneServerTest, LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, fInfoOut.id, fileName, dstFile)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kClone, testFile1_, - dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, - CloneFileType::kFile, false, - CloneStep::kRenameCloneFile, - CloneStatus::cloning); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kClone, testFile1_, + dstFile, kDefaultPoolset, fInfoOut.id, fInfoOut.id, 0, + CloneFileType::kFile, false, + CloneStep::kRenameCloneFile, CloneStatus::cloning); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1420,18 +1393,16 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(dstFile, testUser1_, fakeData)); } -// 以下为Lazy模式用例 -// CreateCloneFile阶段重启,mds上未创建文件 +// The following are the Lazy pattern use cases +// Reboot during the CreateCloneFile phase, no files were created on the mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); std::string uuid1 = UUIDGenerator().GenerateUUID(); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1451,7 +1422,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneFile阶段重启,mds上创建文件成功未返回 +// Reboot during the CreateCloneFile phase, successful file creation on mds but +// not returned TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasCreateCloneFileSuccessNotReturn) { std::string snapId; @@ -1462,12 +1434,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, 0, 0, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, 0, 0, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1487,7 +1457,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上未创建segment +// Reboot during the CreateCloneMeta phase, no segment was created on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1497,12 +1467,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneFile(fileName, &fInfoOut, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1522,7 +1490,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneMeta阶段重启, 在mds上创建segment成功未返回 +// Reboot during the CreateCloneMeta phase, successfully creating segment on mds +// but not returning TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneMetaSuccessNotReturn) { std::string snapId; @@ -1537,12 +1506,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1562,7 +1529,8 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,未在chunkserver上创建clonechunk +// Reboot during the CreateCloneChunk phase, cloneChunk not created on +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1576,12 +1544,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneMeta(&fInfoOut, fileName, &segInfoOutVec)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1601,7 +1567,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCreateCloneChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CreateCloneChunk阶段重启,在chunkserver上创建部分clonechunk +// Restart the CreateCloneChunk phase and create a partial clone chunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCreateCloneChunkSuccessNotReturn) { std::string snapId; @@ -1618,12 +1585,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCreateCloneChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCreateCloneChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1643,7 +1608,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启 +// CompleteCloneMeta phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1659,12 +1624,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCreateCloneChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1684,7 +1647,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneMeta) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneMeta阶段重启,同时在mds上调用CompleteCloneMeta成功但未返回 +// The CompleteCloneMeta phase was restarted, and the call to CompleteCloneMeta +// on mds was successful but did not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneMetaSuccessNotReturn) { std::string snapId; @@ -1703,12 +1667,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneMeta, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneMeta, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1728,7 +1690,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启 +// ChangeOwner phase restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1746,12 +1708,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneMeta(uuid1)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1771,7 +1731,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotChangeOwner) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// ChangeOwner阶段重启,但mds上ChangeOwner成功未返回 +// The ChangeOwner phase restarts, but the ChangeOwner on mds successfully did +// not return TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyChangeOwnerSuccessNotReturn) { std::string snapId; @@ -1792,12 +1753,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kChangeOwner, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, CloneStep::kChangeOwner, + CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1817,7 +1776,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启 +// RenameCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1837,12 +1796,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareChangeOwner(fileName)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1862,7 +1819,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRenameCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RenameCloneFile阶段重启,但mds上已RenameCloneFile成功未返回 +// RenameCloneFile stage restarted, but RenameCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRenameCloneFileSuccessNotReturn) { std::string snapId; @@ -1886,12 +1844,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRenameCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRenameCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1911,7 +1867,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上未调用RecoverChunk +// RecoverChunk phase restarted, RecoverChunk was not called on chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -1934,12 +1890,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRenameCloneFile(fInfoOut.id, testFd1_, fileName, testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -1954,7 +1908,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotRecoverChunk) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// RecoverChunk阶段重启,在chunkserver上部分调用RecoverChunk +// Restart the RecoverChunk phase and partially call RecoverChunk on the +// chunkserver TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyRecoverChunkSuccssNotReturn) { std::string snapId; @@ -1980,12 +1935,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kRecoverChunk, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kRecoverChunk, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2000,7 +1953,7 @@ TEST_F(SnapshotCloneServerTest, ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启 +// CompleteCloneFile stage restart TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { std::string snapId; PrepareSnapshotForTestFile1(&snapId); @@ -2025,12 +1978,10 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareRecoverChunk(segInfoOutVec, true)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); @@ -2045,7 +1996,8 @@ TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyHasNotCompleteCloneFile) { ASSERT_TRUE(CheckFileData(testFile1_, testUser1_, fakeData)); } -// CompleteCloneFile阶段重启,但mds上CompleteCloneFile已成功未返回 +// CompleteCloneFile stage restarted, but CompleteCloneFile successfully did not +// return on mds TEST_F(SnapshotCloneServerTest, TestRecoverCloneLazyCompleteCloneFileSuccessNotReturn) { std::string snapId; @@ -2073,12 +2025,10 @@ TEST_F(SnapshotCloneServerTest, ASSERT_EQ(LIBCURVE_ERROR::OK, PrepareCompleteCloneFile(testFile1_)); - CloneInfo cloneInfo(uuid1, testUser1_, - CloneTaskType::kRecover, snapId, - testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, - CloneFileType::kSnapshot, true, - CloneStep::kCompleteCloneFile, - CloneStatus::recovering); + CloneInfo cloneInfo(uuid1, testUser1_, CloneTaskType::kRecover, snapId, + testFile1_, kDefaultPoolset, fInfoOut.id, testFd1_, 0, + CloneFileType::kSnapshot, true, + CloneStep::kCompleteCloneFile, CloneStatus::recovering); cluster_->metaStore_->AddCloneInfo(cloneInfo); diff --git a/test/mds/topology/test_topology.cpp b/test/mds/topology/test_topology.cpp index c1e878deb4..d139e5c68e 100644 --- a/test/mds/topology/test_topology.cpp +++ b/test/mds/topology/test_topology.cpp @@ -22,25 +22,25 @@ #include -#include "test/mds/topology/mock_topology.h" -#include "src/mds/topology/topology.h" -#include "src/mds/topology/topology_item.h" #include "src/common/configuration.h" #include "src/common/namespace_define.h" +#include "src/mds/topology/topology.h" +#include "src/mds/topology/topology_item.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; -using ::testing::_; -using ::testing::Contains; -using ::testing::SetArgPointee; -using ::testing::SaveArg; -using ::testing::DoAll; using ::curve::common::Configuration; using ::curve::common::kDefaultPoolsetId; using ::curve::common::kDefaultPoolsetName; +using ::testing::_; +using ::testing::Contains; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SaveArg; +using ::testing::SetArgPointee; class TestTopology : public ::testing::Test { protected: @@ -52,13 +52,11 @@ class TestTopology : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); const std::unordered_map poolsetMap{ {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; ON_CALL(*storage_, LoadPoolset(_, _)) .WillByDefault(DoAll( @@ -80,128 +78,90 @@ class TestTopology : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(id, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 0) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 0) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - uint32_t internalPort = 0, - const std::string &externalHostIp = "testExternalIp", - uint32_t externalPort = 0, - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - internalPort, - externalHostIp, - externalPort, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + uint32_t internalPort = 0, + const std::string& externalHostIp = "testExternalIp", + uint32_t externalPort = 0, ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, internalPort, + externalHostIp, externalPort, zoneId, physicalPoolId, + desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -218,16 +178,12 @@ class TestTopology : public ::testing::Test { TEST_F(TestTopology, test_init_success) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); const std::unordered_map poolsetMap{ - {kDefaultPoolsetId, - {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}} - }; + {kDefaultPoolsetId, {kDefaultPoolsetId, kDefaultPoolsetName, "", ""}}}; std::unordered_map logicalPoolMap_; std::unordered_map physicalPoolMap_; std::unordered_map zoneMap_; @@ -235,40 +191,33 @@ TEST_F(TestTopology, test_init_success) { std::unordered_map chunkServerMap_; std::map copySetMap_; - logicalPoolMap_[0x01] = LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, false, true); + logicalPoolMap_[0x01] = + LogicalPool(0x01, "lpool1", 0x11, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, false, true); physicalPoolMap_[0x11] = PhysicalPool(0x11, "pPool1", 0X61, "des1"); zoneMap_[0x21] = Zone(0x21, "zone1", 0x11, "desc1"); - serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, - "127.0.0.1", 8200, 0x21, 0x11, "desc1"); - chunkServerMap_[0x41] = ChunkServer(0x41, "token", "ssd", - 0x31, "127.0.0.1", 8200, "/"); + serverMap_[0x31] = Server(0x31, "server1", "127.0.0.1", 8200, "127.0.0.1", + 8200, 0x21, 0x11, "desc1"); + chunkServerMap_[0x41] = + ChunkServer(0x41, "token", "ssd", 0x31, "127.0.0.1", 8200, "/"); copySetMap_[std::pair(0x01, 0x51)] = CopySetInfo(0x01, 0x51); EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(poolsetMap), Return(true))); EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(logicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap_), Return(true))); EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(zoneMap_), Return(true))); EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(serverMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(serverMap_), Return(true))); EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(chunkServerMap_), Return(true))); EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(copySetMap_), Return(true))); EXPECT_CALL(*idGenerator_, initPoolsetIdGenerator(_)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -278,10 +227,8 @@ TEST_F(TestTopology, test_init_success) { EXPECT_CALL(*idGenerator_, initChunkServerIdGenerator(_)); EXPECT_CALL(*idGenerator_, initCopySetIdGenerator(_)); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); TopologyOption option; int ret = topology_->Init(option); @@ -291,8 +238,7 @@ TEST_F(TestTopology, test_init_success) { TEST_F(TestTopology, test_init_loadClusterFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(false))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(false))); TopologyOption option; int ret = topology_->Init(option); @@ -302,11 +248,9 @@ TEST_F(TestTopology, test_init_loadClusterFail) { TEST_F(TestTopology, test_init_StorageClusterInfoFail) { std::vector infos; EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -318,11 +262,9 @@ TEST_F(TestTopology, test_init_loadLogicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(false)); TopologyOption option; int ret = topology_->Init(option); @@ -334,13 +276,10 @@ TEST_F(TestTopology, test_init_LoadPhysicalPoolFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); @@ -354,15 +293,11 @@ TEST_F(TestTopology, test_init_LoadZoneFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -377,16 +312,11 @@ TEST_F(TestTopology, test_init_LoadServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(false)); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -402,19 +332,13 @@ TEST_F(TestTopology, test_init_LoadChunkServerFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -431,21 +355,14 @@ TEST_F(TestTopology, test_init_LoadCopysetFail) { ClusterInformation info("uuid1"); infos.push_back(info); EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(DoAll(SetArgPointee<0>(infos), - Return(true))); + .WillOnce(DoAll(SetArgPointee<0>(infos), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(false)); EXPECT_CALL(*idGenerator_, initLogicalPoolIdGenerator(_)); EXPECT_CALL(*idGenerator_, initPhysicalPoolIdGenerator(_)); @@ -462,18 +379,11 @@ TEST_F(TestTopology, test_AddLogicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); @@ -487,15 +397,9 @@ TEST_F(TestTopology, test_AddLogicalPool_IdDuplicated) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "test1", physicalPoolId); - LogicalPool pool(id, - "test2", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(id, "test2", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -506,18 +410,11 @@ TEST_F(TestTopology, test_AddLogicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(0x01, "test1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddLogicalPool(pool); @@ -528,16 +425,9 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - LogicalPool pool(0x01, - "test1", - ++physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); - + LogicalPool pool(0x01, "test1", ++physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->AddLogicalPool(pool); @@ -546,26 +436,18 @@ TEST_F(TestTopology, test_AddLogicalPool_PhysicalPoolNotFound) { TEST_F(TestTopology, test_AddPhysicalPool_success) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { PrepareAddPoolset(); PoolIdType id = 0x11; PoolsetIdType pid = 0x61; - PhysicalPool pool(id, - "test1", - pid, - "desc"); + PhysicalPool pool(id, "test1", pid, "desc"); PrepareAddPhysicalPool(id); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeIdDuplicated, ret); @@ -573,12 +455,8 @@ TEST_F(TestTopology, test_AddPhysicalPool_IdDuplicated) { TEST_F(TestTopology, test_AddPhysicalPool_StorageFail) { PrepareAddPoolset(); - PhysicalPool pool(0x11, - "test1", - 0X61, - "desc"); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(false)); + PhysicalPool pool(0x11, "test1", 0X61, "desc"); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -590,13 +468,9 @@ TEST_F(TestTopology, test_AddZone_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); @@ -616,10 +490,7 @@ TEST_F(TestTopology, test_AddZone_IdDuplicated) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -631,13 +502,9 @@ TEST_F(TestTopology, test_AddZone_StorageFail) { PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - Zone zone(0x21, - "testZone", - physicalPoolId, - "desc"); + Zone zone(0x21, "testZone", physicalPoolId, "desc"); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(false)); int ret = topology_->AddZone(zone); @@ -649,11 +516,7 @@ TEST_F(TestTopology, test_AddZone_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Zone zone(zoneId, - "testZone", - physicalPoolId, - "desc"); - + Zone zone(zoneId, "testZone", physicalPoolId, "desc"); int ret = topology_->AddZone(zone); @@ -668,18 +531,10 @@ TEST_F(TestTopology, test_AddServer_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -701,15 +556,8 @@ TEST_F(TestTopology, test_AddServer_IdDuplicated) { PrepareAddZone(zoneId, "test", physicalPoolId); PrepareAddServer(id); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); @@ -724,46 +572,29 @@ TEST_F(TestTopology, test_AddServer_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "test", physicalPoolId); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(false)); - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, test_AddServer_ZoneNotFound) { PrepareAddPoolset(); ServerIdType id = 0x31; PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; - Server server(id, - "server1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc"); + Server server(id, "server1", "ip1", 0, "ip2", 0, zoneId, physicalPoolId, + "desc"); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); } - TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddPoolset(); ChunkServerIdType csId = 0x41; @@ -773,20 +604,13 @@ TEST_F(TestTopology, test_AddChunkServers_success) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); ChunkServerState state; state.SetDiskCapacity(1024); state.SetDiskUsed(512); cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); @@ -812,18 +636,9 @@ TEST_F(TestTopology, test_AddChunkServer_IdDuplicated) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token2", - "ssd", - serverId); - - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + PrepareAddChunkServer(csId, "token2", "ssd", serverId); + + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -839,16 +654,9 @@ TEST_F(TestTopology, test_AddChunkServer_StorageFail) { PrepareAddZone(); PrepareAddServer(serverId); - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(false)); int ret = topology_->AddChunkServer(cs); @@ -860,13 +668,7 @@ TEST_F(TestTopology, test_AddChunkServer_ServerNotFound) { ChunkServerIdType csId = 0x41; ServerIdType serverId = 0x31; - ChunkServer cs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); + ChunkServer cs(csId, "token", "ssd", serverId, "ip1", 100, "/"); int ret = topology_->AddChunkServer(cs); @@ -880,8 +682,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_success) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemoveLogicalPool(id); @@ -904,8 +705,7 @@ TEST_F(TestTopology, test_RemoveLogicalPool_StorageFail) { PoolIdType id = 0x01; PrepareAddLogicalPool(id, "name", physicalPoolId); - EXPECT_CALL(*storage_, DeleteLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemoveLogicalPool(id); @@ -917,8 +717,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_success) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->RemovePhysicalPool(poolId); @@ -939,8 +738,7 @@ TEST_F(TestTopology, test_RemovePhysicalPool_StorageFail) { PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - EXPECT_CALL(*storage_, DeletePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeletePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->RemovePhysicalPool(poolId); @@ -952,12 +750,9 @@ TEST_F(TestTopology, test_RemoveZone_success) { ZoneIdType zoneId = 0x21; PoolIdType poolId = 0x11; PrepareAddPhysicalPool(poolId); - PrepareAddZone(zoneId, - "testZone", - poolId); + PrepareAddZone(zoneId, "testZone", poolId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(true)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -982,8 +777,7 @@ TEST_F(TestTopology, test_RemoveZone_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - EXPECT_CALL(*storage_, DeleteZone(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteZone(_)).WillOnce(Return(false)); int ret = topology_->RemoveZone(zoneId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -995,16 +789,9 @@ TEST_F(TestTopology, test_RemoveServer_success) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(true)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1030,16 +817,9 @@ TEST_F(TestTopology, test_RemoveServer_StorageFail) { ZoneIdType zoneId = 0x21; PrepareAddPhysicalPool(); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "testSever", - "ip1", - 0, - "ip2", - 0, - zoneId); + PrepareAddServer(serverId, "testSever", "ip1", 0, "ip2", 0, zoneId); - EXPECT_CALL(*storage_, DeleteServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteServer(_)).WillOnce(Return(false)); int ret = topology_->RemoveServer(serverId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1052,18 +832,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(true)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1075,7 +851,6 @@ TEST_F(TestTopology, test_RemoveChunkServer_success) { ASSERT_TRUE(it == csList.end()); } - TEST_F(TestTopology, test_RemoveChunkServer_ChunkSeverNotFound) { ChunkServerIdType csId = 0x41; @@ -1090,19 +865,14 @@ TEST_F(TestTopology, test_RemoveChunkServer_StorageFail) { PrepareAddPhysicalPool(); PrepareAddZone(); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId); + PrepareAddChunkServer(csId, "token", "ssd", serverId); - int ret = topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId); + int ret = + topology_->UpdateChunkServerRwState(ChunkServerStatus::RETIRED, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - - EXPECT_CALL(*storage_, DeleteChunkServer(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteChunkServer(_)).WillOnce(Return(false)); ret = topology_->RemoveChunkServer(csId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); @@ -1113,26 +883,15 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdateLogicalPool(pool); @@ -1146,15 +905,9 @@ TEST_F(TestTopology, UpdateLogicalPool_success) { TEST_F(TestTopology, UpdateLogicalPool_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); int ret = topology_->UpdateLogicalPool(pool); @@ -1166,26 +919,15 @@ TEST_F(TestTopology, UpdateLogicalPool_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdateLogicalPool(pool); @@ -1197,24 +939,19 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); LogicalPool pool2; topology_->GetLogicalPool(logicalPoolId, &pool2); ASSERT_EQ(AllocateStatus::ALLOW, pool2.GetStatus()); // update to deny - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1223,11 +960,10 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { ASSERT_EQ(AllocateStatus::DENY, pool3.GetStatus()); // update to allow - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1239,18 +975,12 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_success) { TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_LogicalPoolNotFound) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; - LogicalPool pool(logicalPoolId, - "name1", - physicalPoolId, - APPENDFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0, - true, - true); + LogicalPool pool(logicalPoolId, "name1", physicalPoolId, APPENDFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0, true, true); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeLogicalPoolNotFound, ret); } @@ -1260,19 +990,14 @@ TEST_F(TestTopology, UpdateLogicalPoolAllocateStatus_StorageFail) { PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddLogicalPool(logicalPoolId, - "name1", - physicalPoolId, - PAGEFILE, - LogicalPool::RedundanceAndPlaceMentPolicy(), - LogicalPool::UserPolicy(), - 0); + PrepareAddLogicalPool(logicalPoolId, "name1", physicalPoolId, PAGEFILE, + LogicalPool::RedundanceAndPlaceMentPolicy(), + LogicalPool::UserPolicy(), 0); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); - int ret = topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::ALLOW, logicalPoolId); + int ret = topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::ALLOW, + logicalPoolId); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1285,8 +1010,7 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { PrepareAddLogicalPool(lpid, "name", ppid); auto set_state = [&](PoolIdType lpid, bool scanEnable) { - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); auto retCode = topology_->UpdateLogicalPoolScanState(lpid, scanEnable); ASSERT_EQ(retCode, kTopoErrCodeSuccess); }; @@ -1309,14 +1033,12 @@ TEST_F(TestTopology, TestUpdateLogicalPoolScanState) { check_state(lpid, true); // CASE 4: logical pool not found -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .Times(0); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).Times(0); auto retCode = topology_->UpdateLogicalPoolScanState(lpid + 1, true); ASSERT_EQ(retCode, kTopoErrCodeLogicalPoolNotFound); // CASE 5: update storage fail -> set scan state fail - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(false)); retCode = topology_->UpdateLogicalPoolScanState(lpid, true); ASSERT_EQ(retCode, kTopoErrCodeStorgeFail); } @@ -1325,18 +1047,11 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1349,69 +1064,45 @@ TEST_F(TestTopology, UpdatePhysicalPool_success) { TEST_F(TestTopology, UpdatePhysicalPool_PhysicalPoolNotFound) { PoolIdType physicalPoolId = 0x11; PoolIdType pid = 0x61; - PhysicalPool newPool(physicalPoolId, - "name1", - pid, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", pid, "desc2"); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodePhysicalPoolNotFound, ret); } - TEST_F(TestTopology, UpdatePhysicalPool_StorageFail) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; PoolsetIdType poolsetId = 0x61; - PrepareAddPhysicalPool(physicalPoolId, - "name1", - poolsetId, - "desc1"); + PrepareAddPhysicalPool(physicalPoolId, "name1", poolsetId, "desc1"); - PhysicalPool newPool(physicalPoolId, - "name1", - poolsetId, - "desc2"); + PhysicalPool newPool(physicalPoolId, "name1", poolsetId, "desc2"); - EXPECT_CALL(*storage_, UpdatePhysicalPool(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, UpdatePhysicalPool(_)).WillOnce(Return(false)); int ret = topology_->UpdatePhysicalPool(newPool); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - - TEST_F(TestTopology, UpdateZone_success) { PrepareAddPoolset(); ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(true)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(true)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - TEST_F(TestTopology, UpdateZone_ZoneNotFound) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeZoneNotFound, ret); @@ -1422,18 +1113,11 @@ TEST_F(TestTopology, UpdateZone_StorageFail) { ZoneIdType zoneId = 0x21; PoolIdType physicalPoolId = 0x11; PrepareAddPhysicalPool(physicalPoolId); - PrepareAddZone(zoneId, - "name1", - physicalPoolId, - "desc1"); - - Zone newZone(zoneId, - "name1", - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateZone(_)) - .WillOnce(Return(false)); + PrepareAddZone(zoneId, "name1", physicalPoolId, "desc1"); + + Zone newZone(zoneId, "name1", physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateZone(_)).WillOnce(Return(false)); int ret = topology_->UpdateZone(newZone); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1445,28 +1129,13 @@ TEST_F(TestTopology, UpdateServer_success) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeSuccess, ret); @@ -1477,15 +1146,8 @@ TEST_F(TestTopology, UpdateServer_ServerNotFound) { ZoneIdType zoneId = 0x21; ServerIdType serverId = 0x31; - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeServerNotFound, ret); @@ -1498,34 +1160,18 @@ TEST_F(TestTopology, UpdateServer_StorageFail) { ServerIdType serverId = 0x31; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc1"); - - Server newServer(serverId, - "name1", - "ip1", - 0, - "ip2", - 0, - zoneId, - physicalPoolId, - "desc2"); - - EXPECT_CALL(*storage_, UpdateServer(_)) - .WillOnce(Return(false)); + PrepareAddServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc1"); + + Server newServer(serverId, "name1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId, "desc2"); + + EXPECT_CALL(*storage_, UpdateServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateServer(newServer); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } - TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1535,24 +1181,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1566,28 +1199,15 @@ TEST_F(TestTopology, UpdateChunkServerTopo_UpdateServerSuccess) { ChunkServerIdType csId = 0x41; PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); - PrepareAddServer(serverId, "server1", - "ip1", 0, "ip2", 0, zoneId, physicalPoolId); - PrepareAddServer(serverId2, "server2", - "ip3", 0, "ip4", 0, zoneId, physicalPoolId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId2, - "ip3", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + PrepareAddServer(serverId, "server1", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); + PrepareAddServer(serverId2, "server2", "ip3", 0, "ip4", 0, zoneId, + physicalPoolId); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId2, "ip3", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -1596,13 +1216,7 @@ TEST_F(TestTopology, UpdateChunkServerTopo_ChunkServerNotFound) { ServerIdType serverId = 0x31; ChunkServerIdType csId = 0x41; - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); @@ -1617,24 +1231,11 @@ TEST_F(TestTopology, UpdateChunkServerTopo_StorageFail) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/"); - - ChunkServer newCs(csId, - "token", - "ssd", - serverId, - "ip1", - 100, - "/abc"); - - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(false)); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "ip1", 100, "/"); + + ChunkServer newCs(csId, "token", "ssd", serverId, "ip1", 100, "/abc"); + + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(false)); int ret = topology_->UpdateChunkServerTopo(newCs); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -1648,11 +1249,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); @@ -1662,17 +1259,16 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_success) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(100, pool.GetDiskCapacity()); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep, waiting to flush the database sleep(5); topology_->Stop(); } @@ -1684,7 +1280,7 @@ TEST_F(TestTopology, UpdateChunkServerDiskStatus_ChunkServerNotFound) { csState.SetDiskState(DISKERROR); csState.SetDiskCapacity(100); - int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); + int ret = topology_->UpdateChunkServerDiskStatus(csState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1697,22 +1293,17 @@ TEST_F(TestTopology, UpdateChunkServerRwStateToStorage_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateChunkServer(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateChunkServer(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep, waiting to flush the database sleep(5); topology_->Stop(); } @@ -1726,60 +1317,50 @@ TEST_F(TestTopology, UpdateChunkServerRwStateTestPhysicalPoolCapacity_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); PhysicalPool pool; ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> RETIRED - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::RETIRED, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::RETIRED, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(0, pool.GetDiskCapacity()); // RETIRED -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // READWRITE -> PENDDING - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::PENDDING, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::PENDDING, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); // PENDDING -> READWRITE - ASSERT_EQ(kTopoErrCodeSuccess, - topology_->UpdateChunkServerRwState( - ChunkServerStatus::READWRITE, csId)); + ASSERT_EQ(kTopoErrCodeSuccess, topology_->UpdateChunkServerRwState( + ChunkServerStatus::READWRITE, csId)); ASSERT_TRUE(topology_->GetPhysicalPool(0x11, &pool)); ASSERT_EQ(1024, pool.GetDiskCapacity()); @@ -1790,7 +1371,7 @@ TEST_F(TestTopology, UpdateChunkServerRwState_ChunkServerNotFound) { ChunkServerStatus rwState; rwState = ChunkServerStatus::PENDDING; - int ret = topology_->UpdateChunkServerRwState(rwState, csId); + int ret = topology_->UpdateChunkServerRwState(rwState, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1803,13 +1384,9 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId); PrepareAddServer(serverId); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/"); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/"); uint64_t time = 0x1234567812345678; - int ret = topology_->UpdateChunkServerStartUpTime(time, csId); + int ret = topology_->UpdateChunkServerStartUpTime(time, csId); ASSERT_EQ(kTopoErrCodeSuccess, ret); ChunkServer cs; @@ -1819,7 +1396,7 @@ TEST_F(TestTopology, UpdateChunkServerStartUpTime_success) { TEST_F(TestTopology, UpdateChunkServerStartUpTime_ChunkServerNotFound) { ChunkServerIdType csId = 0x41; - int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); + int ret = topology_->UpdateChunkServerStartUpTime(1000, csId); ASSERT_EQ(kTopoErrCodeChunkServerNotFound, ret); } @@ -1831,19 +1408,18 @@ TEST_F(TestTopology, FindLogicalPool_success) { std::string physicalPoolName = "PhysiclPool1"; PrepareAddPhysicalPool(physicalPoolId, physicalPoolName); PrepareAddLogicalPool(logicalPoolId, logicalPoolName, physicalPoolId); - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); ASSERT_EQ(logicalPoolId, ret); } TEST_F(TestTopology, FindLogicalPool_LogicalPoolNotFound) { std::string logicalPoolName = "logicalPool1"; std::string physicalPoolName = "PhysiclPool1"; - PoolIdType ret = topology_->FindLogicalPool(logicalPoolName, - physicalPoolName); + PoolIdType ret = + topology_->FindLogicalPool(logicalPoolName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindPhysicalPool_success) { @@ -1858,11 +1434,9 @@ TEST_F(TestTopology, FindPhysicalPool_success) { TEST_F(TestTopology, FindPhysicalPool_PhysicalPoolNotFound) { std::string physicalPoolName = "physicalPoolName"; PoolIdType ret = topology_->FindPhysicalPool(physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } - TEST_F(TestTopology, FindZone_success) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -1879,8 +1453,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindZone_success2) { @@ -1900,8 +1473,7 @@ TEST_F(TestTopology, FindZone_ZoneNotFound2) { std::string physicalPoolName = "physicalPoolName"; std::string zoneName = "zoneName"; ZoneIdType ret = topology_->FindZone(zoneName, physicalPoolId); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostName_success) { @@ -1910,8 +1482,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { std::string hostName = "host1"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName); + PrepareAddServer(serverId, hostName); ServerIdType ret = topology_->FindServerByHostName(hostName); ASSERT_EQ(serverId, ret); @@ -1920,8 +1491,7 @@ TEST_F(TestTopology, FindServerByHostName_success) { TEST_F(TestTopology, FindServerByHostName_ServerNotFound) { std::string hostName = "host1"; ServerIdType ret = topology_->FindServerByHostName(hostName); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindServerByHostIpPort_success) { @@ -1932,12 +1502,7 @@ TEST_F(TestTopology, FindServerByHostIpPort_success) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort(internalHostIp, 0); ASSERT_EQ(serverId, ret); @@ -1954,16 +1519,10 @@ TEST_F(TestTopology, FindSeverByHostIp_ServerNotFound) { std::string externalHostIp = "ip2"; PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); ServerIdType ret = topology_->FindServerByHostIpPort("ip3", 0); - ASSERT_EQ(static_cast(UNINTIALIZE_ID), - ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, FindChunkServerNotRetired_success) { @@ -1977,21 +1536,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_success) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); - - ChunkServerIdType ret = topology_->FindChunkServerNotRetired( - internalHostIp, port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); + + ChunkServerIdType ret = + topology_->FindChunkServerNotRetired(internalHostIp, port); ASSERT_EQ(csId, ret); } @@ -2006,22 +1555,11 @@ TEST_F(TestTopology, FindChunkServerNotRetired_ChunkServerNotFound) { PrepareAddPhysicalPool(); PrepareAddZone(); - PrepareAddServer(serverId, - hostName, - internalHostIp, - 0, - externalHostIp, - 0); - PrepareAddChunkServer(csId, - "token", - "ssd", - serverId, - "/", - port); + PrepareAddServer(serverId, hostName, internalHostIp, 0, externalHostIp, 0); + PrepareAddChunkServer(csId, "token", "ssd", serverId, "/", port); ChunkServerIdType ret = topology_->FindChunkServerNotRetired("ip3", port); - ASSERT_EQ(static_cast( - UNINTIALIZE_ID), ret); + ASSERT_EQ(static_cast(UNINTIALIZE_ID), ret); } TEST_F(TestTopology, GetLogicalPool_success) { @@ -2089,7 +1627,6 @@ TEST_F(TestTopology, GetServer_success) { ASSERT_EQ(true, ret); } - TEST_F(TestTopology, GetServer_GetServerNotFound) { PrepareAddPoolset(); PoolIdType physicalPoolId = 0x11; @@ -2133,7 +1670,6 @@ TEST_F(TestTopology, GetChunkServer_ChunkServerNotFound) { ASSERT_EQ(false, ret); } - TEST_F(TestTopology, GetChunkServerInCluster_success) { PoolIdType physicalPoolId = 0x11; ZoneIdType zoneId = 0x21; @@ -2371,8 +1907,8 @@ TEST_F(TestTopology, GetChunkServerInLogicalPool_success) { PrepareAddPoolset(); PrepareAddPhysicalPool(physicalPoolId); PrepareAddZone(zoneId, "name", physicalPoolId); - PrepareAddServer( - serverId, "name2", "ip1", 0, "ip2", 0, zoneId, physicalPoolId); + PrepareAddServer(serverId, "name2", "ip1", 0, "ip2", 0, zoneId, + physicalPoolId); PrepareAddChunkServer(csId, "token", "ssd", serverId); PrepareAddChunkServer(csId2, "token", "ssd", serverId); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId); @@ -2452,12 +1988,12 @@ TEST_F(TestTopology, AddCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2470,8 +2006,7 @@ TEST_F(TestTopology, AddCopySet_success) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeSuccess, ret); } @@ -2486,12 +2021,12 @@ TEST_F(TestTopology, AddCopySet_IdDuplicated) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2519,12 +2054,12 @@ TEST_F(TestTopology, AddCopySet_LogicalPoolNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2551,12 +2086,12 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2569,8 +2104,7 @@ TEST_F(TestTopology, AddCopySet_StorageFail) { CopySetInfo csInfo(logicalPoolId, copysetId); csInfo.SetCopySetMembers(replicas); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(false)); int ret = topology_->AddCopySet(csInfo); ASSERT_EQ(kTopoErrCodeStorgeFail, ret); } @@ -2585,12 +2119,12 @@ TEST_F(TestTopology, RemoveCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2601,8 +2135,7 @@ TEST_F(TestTopology, RemoveCopySet_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(true)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2620,12 +2153,12 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2636,8 +2169,7 @@ TEST_F(TestTopology, RemoveCopySet_storageFail) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - EXPECT_CALL(*storage_, DeleteCopySet(_)) - .WillOnce(Return(false)); + EXPECT_CALL(*storage_, DeleteCopySet(_)).WillOnce(Return(false)); int ret = topology_->RemoveCopySet( std::pair(logicalPoolId, copysetId)); @@ -2655,12 +2187,12 @@ TEST_F(TestTopology, RemoveCopySet_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2687,12 +2219,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2716,11 +2248,10 @@ TEST_F(TestTopology, UpdateCopySetTopo_success) { ASSERT_EQ(kTopoErrCodeSuccess, ret); - // 只刷一次 - EXPECT_CALL(*storage_, UpdateCopySet(_)) - .WillOnce(Return(true)); + // Only brush once + EXPECT_CALL(*storage_, UpdateCopySet(_)).WillOnce(Return(true)); topology_->Run(); - // sleep 等待刷数据库 + // Sleep, waiting to flush the database sleep(5); topology_->Stop(); } @@ -2735,12 +2266,12 @@ TEST_F(TestTopology, UpdateCopySetTopo_CopySetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2775,12 +2306,12 @@ TEST_F(TestTopology, GetCopySet_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2809,12 +2340,12 @@ TEST_F(TestTopology, GetCopySet_CopysetNotFound) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2843,12 +2374,12 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2860,7 +2391,7 @@ TEST_F(TestTopology, GetCopySetsInLogicalPool_success) { PrepareAddCopySet(copysetId, logicalPoolId, replicas); std::vector csList = - topology_->GetCopySetsInLogicalPool(logicalPoolId); + topology_->GetCopySetsInLogicalPool(logicalPoolId); ASSERT_EQ(1, csList.size()); } @@ -2874,12 +2405,12 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2890,8 +2421,7 @@ TEST_F(TestTopology, GetCopySetsInCluster_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInCluster(); + std::vector csList = topology_->GetCopySetsInCluster(); ASSERT_EQ(1, csList.size()); } @@ -2905,12 +2435,12 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { PrepareAddZone(0x21, "zone1", physicalPoolId); PrepareAddZone(0x22, "zone2", physicalPoolId); PrepareAddZone(0x23, "zone3", physicalPoolId); - PrepareAddServer( - 0x31, "server1", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x21, 0x11); - PrepareAddServer( - 0x32, "server2", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x22, 0x11); - PrepareAddServer( - 0x33, "server3", "127.0.0.1" , 0, "127.0.0.1" , 0, 0x23, 0x11); + PrepareAddServer(0x31, "server1", "127.0.0.1", 0, "127.0.0.1", 0, 0x21, + 0x11); + PrepareAddServer(0x32, "server2", "127.0.0.1", 0, "127.0.0.1", 0, 0x22, + 0x11); + PrepareAddServer(0x33, "server3", "127.0.0.1", 0, "127.0.0.1", 0, 0x23, + 0x11); PrepareAddChunkServer(0x41, "token1", "nvme", 0x31, "127.0.0.1", 8200); PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); @@ -2921,44 +2451,33 @@ TEST_F(TestTopology, GetCopySetsInChunkServer_success) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - std::vector csList = - topology_->GetCopySetsInChunkServer(0x41); + std::vector csList = topology_->GetCopySetsInChunkServer(0x41); ASSERT_EQ(1, csList.size()); } TEST_F(TestTopology, test_create_default_poolset) { - EXPECT_CALL(*storage_, LoadClusterInfo(_)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, StorageClusterInfo(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadClusterInfo(_)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageClusterInfo(_)).WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadPoolset(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadPoolset(_, _)).WillOnce(Return(true)); Poolset poolset; EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce( - DoAll(SaveArg<0>(&poolset), Return(true))); + .WillOnce(DoAll(SaveArg<0>(&poolset), Return(true))); std::unordered_map physicalPoolMap{ {1, {1, "pool1", UNINTIALIZE_ID, ""}}, {2, {2, "pool2", UNINTIALIZE_ID, ""}}, }; EXPECT_CALL(*storage_, LoadPhysicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), - SetArgPointee<1>(2), + .WillOnce(DoAll(SetArgPointee<0>(physicalPoolMap), SetArgPointee<1>(2), Return(true))); - EXPECT_CALL(*storage_, LoadLogicalPool(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadZone(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadChunkServer(_, _)) - .WillOnce(Return(true)); - EXPECT_CALL(*storage_, LoadCopySet(_, _)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadLogicalPool(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadZone(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadChunkServer(_, _)).WillOnce(Return(true)); + EXPECT_CALL(*storage_, LoadCopySet(_, _)).WillOnce(Return(true)); int rc = topology_->Init({}); ASSERT_EQ(kTopoErrCodeSuccess, rc); diff --git a/test/mds/topology/test_topology_chunk_allocator.cpp b/test/mds/topology/test_topology_chunk_allocator.cpp index a1ea8aa942..08289623f4 100644 --- a/test/mds/topology/test_topology_chunk_allocator.cpp +++ b/test/mds/topology/test_topology_chunk_allocator.cpp @@ -20,30 +20,28 @@ * Author: xuchaojie */ -#include #include +#include #include - -#include "src/mds/topology/topology_chunk_allocator.h" -#include "src/mds/common/mds_define.h" -#include "test/mds/topology/mock_topology.h" -#include "test/mds/mock/mock_topology.h" #include "proto/nameserver2.pb.h" #include "src/common/timeutility.h" +#include "src/mds/common/mds_define.h" +#include "src/mds/topology/topology_chunk_allocator.h" #include "test/mds/mock/mock_alloc_statistic.h" +#include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; using ::testing::Invoke; - +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyChunkAllocator : public ::testing::Test { protected: @@ -54,21 +52,17 @@ class TestTopologyChunkAllocator : public ::testing::Test { tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); TopologyOption option; topoStat_ = std::make_shared(topology_); - chunkFilePoolAllocHelp_ = - std::make_shared(); + chunkFilePoolAllocHelp_ = std::make_shared(); chunkFilePoolAllocHelp_->UpdateChunkFilePoolAllocConfig(true, true, 15); option.PoolUsagePercentLimit = 85; option.enableLogicalPoolStatus = true; allocStatistic_ = std::make_shared(); - testObj_ = std::make_shared(topology_, - allocStatistic_, - topoStat_, - chunkFilePoolAllocHelp_, - option); + testObj_ = std::make_shared( + topology_, allocStatistic_, topoStat_, chunkFilePoolAllocHelp_, + option); } virtual void TearDown() { @@ -85,53 +79,37 @@ class TestTopologyChunkAllocator : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool", - uint64_t diskCapacity = 10240) { - PhysicalPool pool(id, - name, - pid, - desc); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool", + uint64_t diskCapacity = 10240) { + PhysicalPool pool(id, name, pid, desc); pool.SetDiskCapacity(diskCapacity); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) @@ -139,78 +117,56 @@ class TestTopologyChunkAllocator : public ::testing::Test { } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } - void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/", - uint64_t diskUsed = 512, - uint64_t diskCapacity = 1024) { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState state; - state.SetDiskCapacity(diskCapacity); - state.SetDiskUsed(diskUsed); - cs.SetChunkServerState(state); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + void PrepareAddChunkServer( + ChunkServerIdType id = 0x41, const std::string& token = "testToken", + const std::string& diskType = "nvme", ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", uint32_t port = 0, + const std::string& diskPath = "/", uint64_t diskUsed = 512, + uint64_t diskCapacity = 1024) { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState state; + state.SetDiskCapacity(diskCapacity); + state.SetDiskUsed(diskUsed); + cs.SetChunkServerState(state); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ChunkServerStat stat; - stat.chunkFilepoolSize = diskCapacity-diskUsed; + stat.chunkFilepoolSize = diskCapacity - diskUsed; topoStat_->UpdateChunkServerStat(id, stat); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members, - bool availFlag = true) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members, + bool availFlag = true) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); cs.SetAvailableFlag(availFlag); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -228,7 +184,7 @@ class TestTopologyChunkAllocator : public ::testing::Test { }; TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_success) { + Test_AllocateChunkRandomInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -247,7 +203,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -258,12 +214,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -275,20 +227,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRandomInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { + Test_AllocateChunkRandomInSingleLogicalPool_shouldfail) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -304,7 +252,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -315,12 +263,8 @@ TEST_F(TestTopologyChunkAllocator, EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); @@ -328,12 +272,8 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_TRUE(ret); @@ -343,18 +283,14 @@ TEST_F(TestTopologyChunkAllocator, topoStat_->UpdateChunkServerStat(0x42, stat); topoStat_->UpdateChunkServerStat(0x43, stat); - ret = - testObj_->AllocateChunkRandomInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 2, - 1024, - &infos); + ret = testObj_->AllocateChunkRandomInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 2, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { + Test_GetRemainingSpaceInLogicalPool_UseChunkFilePool) { std::vector infos; PoolIdType logicalPoolId = 0x01; @@ -370,7 +306,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddServer(0x32, "server2", "127.0.0.1", "127.0.0.1", 0x22, 0x11); PrepareAddServer(0x33, "server3", "127.0.0.1", "127.0.0.1", 0x23, 0x11); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -385,16 +321,16 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); std::map enoughsize; - std::vector pools ={0x01}; + std::vector pools = {0x01}; for (int i = 0; i < 10; i++) { - testObj_->GetRemainingSpaceInLogicalPool(pools, - &enoughsize, "testPoolset"); + testObj_->GetRemainingSpaceInLogicalPool(pools, &enoughsize, + "testPoolset"); ASSERT_EQ(enoughsize[logicalPoolId], 1109); } } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_success) { std::vector infos; PrepareAddPoolset(); @@ -412,7 +348,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -423,16 +359,11 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_TRUE(ret); @@ -443,12 +374,8 @@ TEST_F(TestTopologyChunkAllocator, // second time std::vector infos2; - ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos2); + ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos2); ASSERT_TRUE(ret); @@ -493,20 +420,16 @@ TEST_F(TestTopologyChunkAllocator, } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolNotFound) { std::vector infos; - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_copysetEmpty) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -514,18 +437,14 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddPhysicalPool(physicalPoolId); PrepareAddLogicalPool(logicalPoolId); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 1, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 1, 1024, &infos); ASSERT_FALSE(ret); } TEST_F(TestTopologyChunkAllocator, - Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { + Test_AllocateChunkRoundRobinInSingleLogicalPool_logicalPoolIsDENY) { std::vector infos; PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; @@ -542,7 +461,7 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddChunkServer(0x42, "token2", "nvme", 0x32, "127.0.0.1", 8200); PrepareAddChunkServer(0x43, "token3", "nvme", 0x33, "127.0.0.1", 8200); PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE); + PAGEFILE); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); @@ -553,27 +472,23 @@ TEST_F(TestTopologyChunkAllocator, PrepareAddCopySet(0x54, logicalPoolId, replicas); PrepareAddCopySet(0x55, logicalPoolId, replicas); - EXPECT_CALL(*storage_, UpdateLogicalPool(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, UpdateLogicalPool(_)).WillOnce(Return(true)); - topology_->UpdateLogicalPoolAllocateStatus( - AllocateStatus::DENY, logicalPoolId); + topology_->UpdateLogicalPoolAllocateStatus(AllocateStatus::DENY, + logicalPoolId); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) .WillRepeatedly(Return(true)); - bool ret = - testObj_->AllocateChunkRoundRobinInSingleLogicalPool(INODE_PAGEFILE, - "testPoolset", - 3, - 1024, - &infos); + bool ret = testObj_->AllocateChunkRoundRobinInSingleLogicalPool( + INODE_PAGEFILE, "testPoolset", 3, 1024, &infos); ASSERT_FALSE(ret); } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, with 64 chunks allocated each + // time std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 2000; i++) { @@ -584,12 +499,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { for (int i = 0; i < 100000; i++) { int chunkNumber = 64; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( + copySetIds, 1, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(chunkNumber, infos.size()); for (int j = 0; j < chunkNumber; j++) { @@ -598,7 +509,7 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } int minCount = copySetMap[0]; int maxCount = copySetMap[0]; - for (auto &pair : copySetMap) { + for (auto& pair : copySetMap) { if (pair.second > maxCount) { maxCount = pair.second; } @@ -610,10 +521,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { double minPercent = static_cast(avg - minCount) / avg; double maxPercent = static_cast(maxCount - avg) / avg; LOG(INFO) << "AllocateChunkRandomInSingleLogicalPool poc" - <<", minCount = " << minCount - <<", maxCount = " << maxCount - << ", avg = " << avg - << ", minPercent = " << minPercent + << ", minCount = " << minCount << ", maxCount = " << maxCount + << ", avg = " << avg << ", minPercent = " << minPercent << ", maxPercent = " << maxPercent; ASSERT_TRUE(minPercent < 0.1); @@ -621,7 +530,8 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolPoc) { } TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { - // 2000个copyset分配100000次,每次分配64个chunk + // 2000 copysets are allocated 100000 times, + // with 64 chunks allocated each time std::vector copySetIds; for (int i = 0; i < 2000; i++) { copySetIds.push_back(i); @@ -632,23 +542,19 @@ TEST(TestAllocateChunkPolicy, TestAllocateChunkRandomInSingleLogicalPoolTps) { int chunkNumber = 64; std::vector infos; AllocateChunkPolicy::AllocateChunkRandomInSingleLogicalPool( - copySetIds, - 1, - chunkNumber, - &infos); + copySetIds, 1, chunkNumber, &infos); } uint64_t stoptime = curve::common::TimeUtility::GetTimeofDayUs(); double usetime = stoptime - startime; - double tps = 1000000.0 * 100000.0/usetime; + double tps = 1000000.0 * 100000.0 / usetime; - std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " - << tps + std::cout << "TestAllocateChunkRandomInSingleLogicalPool, TPS = " << tps << " * 64 chunk per second."; } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { + TestAllocateChunkRoundRobinInSingleLogicalPoolSuccess) { std::vector copySetIds; std::map copySetMap; for (int i = 0; i < 20; i++) { @@ -657,13 +563,8 @@ TEST(TestAllocateChunkPolicy, uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_TRUE(ret); ASSERT_EQ(5, nextIndex); ASSERT_EQ(chunkNumber, infos.size()); @@ -680,26 +581,20 @@ TEST(TestAllocateChunkPolicy, } TEST(TestAllocateChunkPolicy, - TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { + TestAllocateChunkRoundRobinInSingleLogicalPoolEmpty) { std::vector copySetIds; std::map copySetMap; uint32_t nextIndex = 15; int chunkNumber = 10; std::vector infos; - bool ret = - AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( - copySetIds, - 1, - &nextIndex, - chunkNumber, - &infos); + bool ret = AllocateChunkPolicy::AllocateChunkRoundRobinInSingleLogicalPool( + copySetIds, 1, &nextIndex, chunkNumber, &infos); ASSERT_FALSE(ret); ASSERT_EQ(15, nextIndex); ASSERT_EQ(0, infos.size()); } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc) { std::map poolWeightMap; std::map poolMap; for (int i = 0; i < 5; i++) { @@ -709,8 +604,8 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolWeightMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolWeightMap, + &pid); poolMap[pid]++; } @@ -719,7 +614,8 @@ TEST(TestAllocateChunkPolicy, ASSERT_TRUE(poolMap[1] < poolMap[2]); ASSERT_TRUE(poolMap[2] < poolMap[3]); ASSERT_TRUE(poolMap[3] < poolMap[4]); - // 5个池大概分布因该是0, 10000,20000,30000,40000 + // The approximate distribution of 5 pools should be + // 0, 10000, 20000, 30000, 40000 LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -727,8 +623,7 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolByWeightPoc2) { +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolByWeightPoc2) { std::map poolMap; poolMap[0] = 100000; poolMap[1] = 90000; @@ -738,12 +633,11 @@ TEST(TestAllocateChunkPolicy, for (int i = 0; i < 100000; i++) { PoolIdType pid; - AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight( - poolMap, &pid); + AllocateChunkPolicy::ChooseSingleLogicalPoolByWeight(poolMap, &pid); poolMap[pid] -= 1; } - // 测试是否能逐渐拉平pool之间差距 + // Test to see if it is possible to gradually equalize the gap between pools LOG(INFO) << "pool0 : " << poolMap[0] << std::endl << "pool1 : " << poolMap[1] << std::endl << "pool2 : " << poolMap[2] << std::endl @@ -751,9 +645,8 @@ TEST(TestAllocateChunkPolicy, << "pool4 : " << poolMap[4] << std::endl; } -// 测试能否随机到每个pool -TEST(TestAllocateChunkPolicy, - TestChooseSingleLogicalPoolRandom) { +// Test to see if random allocation to each pool is possible +TEST(TestAllocateChunkPolicy, TestChooseSingleLogicalPoolRandom) { std::vector pools = {1, 2, 3, 4, 5}; std::map allocMap; allocMap[1] = 0; diff --git a/test/mds/topology/test_topology_metric.cpp b/test/mds/topology/test_topology_metric.cpp index 2a38263784..fd1112a4ec 100644 --- a/test/mds/topology/test_topology_metric.cpp +++ b/test/mds/topology/test_topology_metric.cpp @@ -21,24 +21,24 @@ */ #include -#include #include +#include #include "src/mds/topology/topology_metric.h" -#include "test/mds/topology/mock_topology.h" #include "test/mds/mock/mock_alloc_statistic.h" #include "test/mds/mock/mock_topology.h" +#include "test/mds/topology/mock_topology.h" namespace curve { namespace mds { namespace topology { -using ::testing::Return; using ::testing::_; using ::testing::AnyOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; using ::testing::DoAll; +using ::testing::Invoke; +using ::testing::Return; +using ::testing::SetArgPointee; class TestTopologyMetric : public ::testing::Test { public: @@ -48,10 +48,9 @@ class TestTopologyMetric : public ::testing::Test { idGenerator_ = std::make_shared(); tokenGenerator_ = std::make_shared(); storage_ = std::make_shared(); - // 使用真实的topology + // Using real topology topology_ = std::make_shared(idGenerator_, - tokenGenerator_, - storage_); + tokenGenerator_, storage_); topologyStat_ = std::make_shared(); allocStatistic_ = std::make_shared(); @@ -76,122 +75,87 @@ class TestTopologyMetric : public ::testing::Test { const std::string& type = "SSD", const std::string& desc = "descPoolset") { Poolset poolset(pid, name, type, desc); - EXPECT_CALL(*storage_, StoragePoolset(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StoragePoolset(_)).WillOnce(Return(true)); int ret = topology_->AddPoolset(poolset); ASSERT_EQ(kTopoErrCodeSuccess, ret); } - void PrepareAddLogicalPool(PoolIdType id = 0x01, - const std::string &name = "testLogicalPool", - PoolIdType phyPoolId = 0x11, - LogicalPoolType type = PAGEFILE, - const LogicalPool::RedundanceAndPlaceMentPolicy &rap = - LogicalPool::RedundanceAndPlaceMentPolicy(), - const LogicalPool::UserPolicy &policy = LogicalPool::UserPolicy(), - uint64_t createTime = 0x888 - ) { - LogicalPool pool(id, - name, - phyPoolId, - type, - rap, - policy, - createTime, - true, - true); - - EXPECT_CALL(*storage_, StorageLogicalPool(_)) - .WillOnce(Return(true)); + void PrepareAddLogicalPool( + PoolIdType id = 0x01, const std::string& name = "testLogicalPool", + PoolIdType phyPoolId = 0x11, LogicalPoolType type = PAGEFILE, + const LogicalPool::RedundanceAndPlaceMentPolicy& rap = + LogicalPool::RedundanceAndPlaceMentPolicy(), + const LogicalPool::UserPolicy& policy = LogicalPool::UserPolicy(), + uint64_t createTime = 0x888) { + LogicalPool pool(id, name, phyPoolId, type, rap, policy, createTime, + true, true); + + EXPECT_CALL(*storage_, StorageLogicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddLogicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddPhysicalPool()"; } - void PrepareAddPhysicalPool(PoolIdType id = 0x11, - const std::string &name = "testPhysicalPool", - PoolsetIdType pid = 0x61, - const std::string &desc = "descPhysicalPool") { - PhysicalPool pool(id, - name, - pid, - desc); - EXPECT_CALL(*storage_, StoragePhysicalPool(_)) - .WillOnce(Return(true)); + const std::string& name = "testPhysicalPool", + PoolsetIdType pid = 0x61, + const std::string& desc = "descPhysicalPool") { + PhysicalPool pool(id, name, pid, desc); + EXPECT_CALL(*storage_, StoragePhysicalPool(_)).WillOnce(Return(true)); int ret = topology_->AddPhysicalPool(pool); ASSERT_EQ(kTopoErrCodeSuccess, ret); } void PrepareAddZone(ZoneIdType id = 0x21, - const std::string &name = "testZone", - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descZone") { + const std::string& name = "testZone", + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descZone") { Zone zone(id, name, physicalPoolId, desc); - EXPECT_CALL(*storage_, StorageZone(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageZone(_)).WillOnce(Return(true)); int ret = topology_->AddZone(zone); - ASSERT_EQ(kTopoErrCodeSuccess, ret) << - "should have PrepareAddPhysicalPool()"; + ASSERT_EQ(kTopoErrCodeSuccess, ret) + << "should have PrepareAddPhysicalPool()"; } void PrepareAddServer(ServerIdType id = 0x31, - const std::string &hostName = "testServer", - const std::string &internalHostIp = "testInternalIp", - const std::string &externalHostIp = "testExternalIp", - ZoneIdType zoneId = 0x21, - PoolIdType physicalPoolId = 0x11, - const std::string &desc = "descServer") { - Server server(id, - hostName, - internalHostIp, - 0, - externalHostIp, - 0, - zoneId, - physicalPoolId, - desc); - EXPECT_CALL(*storage_, StorageServer(_)) - .WillOnce(Return(true)); + const std::string& hostName = "testServer", + const std::string& internalHostIp = "testInternalIp", + const std::string& externalHostIp = "testExternalIp", + ZoneIdType zoneId = 0x21, + PoolIdType physicalPoolId = 0x11, + const std::string& desc = "descServer") { + Server server(id, hostName, internalHostIp, 0, externalHostIp, 0, + zoneId, physicalPoolId, desc); + EXPECT_CALL(*storage_, StorageServer(_)).WillOnce(Return(true)); int ret = topology_->AddServer(server); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddZone()"; } void PrepareAddChunkServer(ChunkServerIdType id = 0x41, - const std::string &token = "testToken", - const std::string &diskType = "nvme", - ServerIdType serverId = 0x31, - const std::string &hostIp = "testInternalIp", - uint32_t port = 0, - const std::string &diskPath = "/") { - ChunkServer cs(id, - token, - diskType, - serverId, - hostIp, - port, - diskPath); - ChunkServerState st; - st.SetDiskCapacity(100 * 1024); - st.SetDiskUsed(10 * 1024); - cs.SetChunkServerState(st); - EXPECT_CALL(*storage_, StorageChunkServer(_)) - .WillOnce(Return(true)); + const std::string& token = "testToken", + const std::string& diskType = "nvme", + ServerIdType serverId = 0x31, + const std::string& hostIp = "testInternalIp", + uint32_t port = 0, + const std::string& diskPath = "/") { + ChunkServer cs(id, token, diskType, serverId, hostIp, port, diskPath); + ChunkServerState st; + st.SetDiskCapacity(100 * 1024); + st.SetDiskUsed(10 * 1024); + cs.SetChunkServerState(st); + EXPECT_CALL(*storage_, StorageChunkServer(_)).WillOnce(Return(true)); int ret = topology_->AddChunkServer(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddServer()"; } - void PrepareAddCopySet(CopySetIdType copysetId, - PoolIdType logicalPoolId, - const std::set &members) { - CopySetInfo cs(logicalPoolId, - copysetId); + void PrepareAddCopySet(CopySetIdType copysetId, PoolIdType logicalPoolId, + const std::set& members) { + CopySetInfo cs(logicalPoolId, copysetId); cs.SetCopySetMembers(members); - EXPECT_CALL(*storage_, StorageCopySet(_)) - .WillOnce(Return(true)); + EXPECT_CALL(*storage_, StorageCopySet(_)).WillOnce(Return(true)); int ret = topology_->AddCopySet(cs); ASSERT_EQ(kTopoErrCodeSuccess, ret) << "should have PrepareAddLogicalPool()"; @@ -207,7 +171,7 @@ class TestTopologyMetric : public ::testing::Test { std::shared_ptr testObj_; }; -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { PoolsetIdType poolsetId = 0x61; PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -229,14 +193,13 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { rap.pageFileRAP.replicaNum = 3; PrepareAddLogicalPool(logicalPoolId, "logicalPool1", physicalPoolId, - PAGEFILE, rap); + PAGEFILE, rap); std::set replicas; replicas.insert(0x41); replicas.insert(0x42); replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -258,12 +221,10 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); EXPECT_CALL(*allocStatistic_, GetAllocByLogicalPool(_, _)) - .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), - Return(true))); + .WillOnce(DoAll(SetArgPointee<1>(20 * 1024), Return(true))); testObj_->UpdateTopologyMetrics(); @@ -283,9 +244,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x41]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x41]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x42]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x42]->copysetNum.get_value()); @@ -301,9 +262,9 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x42]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x42]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(2, gChunkServerMetrics[0x43]->scatterWidth.get_value()); ASSERT_EQ(1, gChunkServerMetrics[0x43]->copysetNum.get_value()); @@ -319,43 +280,75 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeUsedBytes.get_value()); ASSERT_EQ(1024, gChunkServerMetrics[0x43]->chunkSizeLeftBytes.get_value()); ASSERT_EQ(1024, - gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTrashedBytes.get_value()); ASSERT_EQ(1024 * 3, - gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); + gChunkServerMetrics[0x43]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(1, gLogicalPoolMetrics.size()); - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); //NOLINT - ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->chunkServerNum.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->scatterWidthRange.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMin.get_value()); //NOLINT - ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId]->scatterWidthMax.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->copysetNumRange.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMin.get_value()); //NOLINT - ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId]->copysetNumMax.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumAvg.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumVariance.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumStandardDeviation.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumRange.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMin.get_value()); //NOLINT - ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId]->leaderNumMax.get_value()); //NOLINT - ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskCapacity.get_value()); //NOLINT - ASSERT_EQ(20 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); //NOLINT - ASSERT_EQ(10 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); //NOLINT - - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 3, + gLogicalPoolMetrics[logicalPoolId]->serverNum.get_value()); // NOLINT + ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId] + ->chunkServerNum.get_value()); // NOLINT + ASSERT_EQ( + 1, + gLogicalPoolMetrics[logicalPoolId]->copysetNum.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthRange.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMin.get_value()); // NOLINT + ASSERT_EQ(2, gLogicalPoolMetrics[logicalPoolId] + ->scatterWidthMax.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumRange.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMin.get_value()); // NOLINT + ASSERT_EQ(1, gLogicalPoolMetrics[logicalPoolId] + ->copysetNumMax.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumAvg.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumVariance.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumStandardDeviation.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumRange.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMin.get_value()); // NOLINT + ASSERT_EQ(0, gLogicalPoolMetrics[logicalPoolId] + ->leaderNumMax.get_value()); // NOLINT + ASSERT_EQ(100 * 1024 * 3, gLogicalPoolMetrics[logicalPoolId] + ->diskCapacity.get_value()); // NOLINT + ASSERT_EQ( + 20 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskAlloc.get_value()); // NOLINT + ASSERT_EQ( + 10 * 1024 * 3, + gLogicalPoolMetrics[logicalPoolId]->diskUsed.get_value()); // NOLINT + + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeUsedBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeLeftBytes.get_value()); - ASSERT_EQ(1024 * 3, + ASSERT_EQ( + 1024 * 3, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTrashedBytes.get_value()); - ASSERT_EQ(1024 * 9, + ASSERT_EQ( + 1024 * 9, gLogicalPoolMetrics[logicalPoolId]->chunkSizeTotalBytes.get_value()); ASSERT_EQ(3, gLogicalPoolMetrics[logicalPoolId]->readIOPS.get_value()); @@ -372,7 +365,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsOneLogicalPool) { ASSERT_EQ(1, gClusterMetrics->copysetNum.get_value()); } -TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { +TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { PrepareAddPoolset(); PoolIdType logicalPoolId = 0x01; PoolIdType physicalPoolId = 0x11; @@ -396,7 +389,6 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { replicas.insert(0x43); PrepareAddCopySet(copysetId, logicalPoolId, replicas); - ChunkServerStat stat1; CopysetStat cstat1; stat1.leaderCount = 1; @@ -414,8 +406,7 @@ TEST_F(TestTopologyMetric, TestUpdateTopologyMetricsCleanRetired) { stat1.copysetStats.push_back(cstat1); EXPECT_CALL(*topologyStat_, GetChunkServerStat(_, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), - Return(true))); + .WillRepeatedly(DoAll(SetArgPointee<1>(stat1), Return(true))); testObj_->UpdateTopologyMetrics(); diff --git a/test/snapshotcloneserver/test_curvefs_client.cpp b/test/snapshotcloneserver/test_curvefs_client.cpp index b4e79b17b0..fe3821011d 100644 --- a/test/snapshotcloneserver/test_curvefs_client.cpp +++ b/test/snapshotcloneserver/test_curvefs_client.cpp @@ -20,9 +20,8 @@ * Author: xuchaojie */ - -#include #include +#include #include "src/snapshotcloneserver/common/curvefs_client.h" #include "test/util/config_generator.h" @@ -40,19 +39,14 @@ class TestCurveFsClientImpl : public ::testing::Test { static void SetUpTestCase() { ClientConfigGenerator gentor(kClientConfigPath); - // 把超时时间和重试次数改小,已使得测试尽快完成 + // Reducing the timeout and retry times has enabled the testing to + // complete as soon as possible std::vector options = { - {"mds.listen.addr=127.0.0.1:8888", - "mds.registerToMDS=false", - "mds.rpcTimeoutMS=1", - "mds.maxRPCTimeoutMS=1", - "mds.maxRetryMS=1", - "mds.rpcRetryIntervalUS=1", - "metacache.getLeaderTimeOutMS=1", - "metacache.getLeaderRetry=1", - "metacache.rpcRetryIntervalUS=1", - "chunkserver.opRetryIntervalUS=1", - "chunkserver.opMaxRetry=1", + {"mds.listen.addr=127.0.0.1:8888", "mds.registerToMDS=false", + "mds.rpcTimeoutMS=1", "mds.maxRPCTimeoutMS=1", "mds.maxRetryMS=1", + "mds.rpcRetryIntervalUS=1", "metacache.getLeaderTimeOutMS=1", + "metacache.getLeaderRetry=1", "metacache.rpcRetryIntervalUS=1", + "chunkserver.opRetryIntervalUS=1", "chunkserver.opMaxRetry=1", "chunkserver.rpcTimeoutMS=1", "chunkserver.maxRetrySleepIntervalUS=1", "chunkserver.maxRPCTimeoutMS=1"}, @@ -64,8 +58,7 @@ class TestCurveFsClientImpl : public ::testing::Test { virtual void SetUp() { std::shared_ptr snapClient = std::make_shared(); - std::shared_ptr fileClient = - std::make_shared(); + std::shared_ptr fileClient = std::make_shared(); client_ = std::make_shared(snapClient, fileClient); clientOption_.configPath = kClientConfigPath; clientOption_.mdsRootUser = "root"; @@ -75,9 +68,7 @@ class TestCurveFsClientImpl : public ::testing::Test { client_->Init(clientOption_); } - virtual void TearDown() { - client_->UnInit(); - } + virtual void TearDown() { client_->UnInit(); } protected: std::shared_ptr client_; @@ -85,9 +76,7 @@ class TestCurveFsClientImpl : public ::testing::Test { }; struct TestClosure : public SnapCloneClosure { - void Run() { - std::unique_ptr selfGuard(this); - } + void Run() { std::unique_ptr selfGuard(this); } }; TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { @@ -111,35 +100,35 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { SegmentInfo segInfo; ret = client_->GetSnapshotSegmentInfo("file1", "user1", 1, 0, &segInfo); ASSERT_LT(ret, 0); - ret = client_->GetSnapshotSegmentInfo( - "file1", clientOption_.mdsRootUser, 1, 0, &segInfo); + ret = client_->GetSnapshotSegmentInfo("file1", clientOption_.mdsRootUser, 1, + 0, &segInfo); ASSERT_LT(ret, 0); ChunkIDInfo cidinfo; FileStatus fstatus; ret = client_->CheckSnapShotStatus("file1", "user1", 1, &fstatus); ASSERT_LT(ret, 0); - ret = client_->CheckSnapShotStatus( - "file1", clientOption_.mdsRootUser, 1, &fstatus); + ret = client_->CheckSnapShotStatus("file1", clientOption_.mdsRootUser, 1, + &fstatus); ASSERT_LT(ret, 0); ChunkInfoDetail chunkInfo; ret = client_->GetChunkInfo(cidinfo, &chunkInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", "user1", 1024, 1, 1024, 0, 0, "default", &fInfo); + ret = client_->CreateCloneFile("source1", "file1", "user1", 1024, 1, 1024, + 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - ret = client_->CreateCloneFile( - "source1", "file1", clientOption_.mdsRootUser, 1024, 1, 1024, - 0, 0, "default", &fInfo); + ret = + client_->CreateCloneFile("source1", "file1", clientOption_.mdsRootUser, + 1024, 1, 1024, 0, 0, "default", &fInfo); ASSERT_LT(ret, 0); - TestClosure *cb = new TestClosure(); + TestClosure* cb = new TestClosure(); ret = client_->CreateCloneChunk("", cidinfo, 1, 2, 1024, cb); ASSERT_EQ(ret, 0); - TestClosure *cb2 = new TestClosure(); + TestClosure* cb2 = new TestClosure(); ret = client_->RecoverChunk(cidinfo, 0, 1024, cb2); ASSERT_EQ(ret, 0); @@ -159,7 +148,10 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->GetFileInfo("file1", clientOption_.mdsRootUser, &fInfo); ASSERT_LT(ret, 0); - // client 对mds接口无限重试,这两个接口死循环,先注释掉 + // // The client is indefinitely retrying the MDS interface, causing + // these two interfaces to enter into an infinite loop. Commenting + // them out for now. + // ret = client_->GetOrAllocateSegmentInfo( // true, 0, &fInfo, "user1", &segInfo); // ASSERT_LT(ret, 0); @@ -169,8 +161,8 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ret = client_->RenameCloneFile("user1", 1, 2, "file1", "file2"); ASSERT_LT(ret, 0); - ret = client_->RenameCloneFile( - clientOption_.mdsRootUser, 1, 2, "file1", "file2"); + ret = client_->RenameCloneFile(clientOption_.mdsRootUser, 1, 2, "file1", + "file2"); ASSERT_LT(ret, 0); ret = client_->DeleteFile("file1", "user1", 1); @@ -187,7 +179,5 @@ TEST_F(TestCurveFsClientImpl, TestClientInterfaceFail) { ASSERT_LT(ret, 0); } - - } // namespace snapshotcloneserver } // namespace curve diff --git a/test/snapshotcloneserver/test_snapshot_service_manager.cpp b/test/snapshotcloneserver/test_snapshot_service_manager.cpp index ba51d90f98..0af03c9315 100644 --- a/test/snapshotcloneserver/test_snapshot_service_manager.cpp +++ b/test/snapshotcloneserver/test_snapshot_service_manager.cpp @@ -20,25 +20,24 @@ * Author: xuchaojie */ -#include #include +#include -#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" -#include "src/common/snapshotclone/snapshotclone_define.h" - -#include "test/snapshotcloneserver/mock_snapshot_server.h" #include "src/common/concurrent/count_down_event.h" +#include "src/common/snapshotclone/snapshotclone_define.h" #include "src/snapshotcloneserver/common/snapshotclone_metric.h" +#include "src/snapshotcloneserver/snapshot/snapshot_service_manager.h" +#include "test/snapshotcloneserver/mock_snapshot_server.h" using curve::common::CountDownEvent; -using ::testing::Return; using ::testing::_; -using ::testing::AnyOf; using ::testing::AllOf; -using ::testing::SetArgPointee; -using ::testing::Invoke; +using ::testing::AnyOf; using ::testing::DoAll; +using ::testing::Invoke; using ::testing::Property; +using ::testing::Return; +using ::testing::SetArgPointee; namespace curve { namespace snapshotcloneserver { @@ -51,21 +50,16 @@ class TestSnapshotServiceManager : public ::testing::Test { virtual void SetUp() { serverOption_.snapshotPoolThreadNum = 8; serverOption_.snapshotTaskManagerScanIntervalMs = 100; - core_ = - std::make_shared(); - auto metaStore_ = - std::shared_ptr(); + core_ = std::make_shared(); + auto metaStore_ = std::shared_ptr(); snapshotMetric_ = std::make_shared(metaStore_); - std::shared_ptr - taskMgr_ = + std::shared_ptr taskMgr_ = std::make_shared(core_, snapshotMetric_); manager_ = std::make_shared(taskMgr_, core_); - ASSERT_EQ(0, manager_->Init(serverOption_)) - << "manager init fail."; - ASSERT_EQ(0, manager_->Start()) - << "manager start fail."; + ASSERT_EQ(0, manager_->Init(serverOption_)) << "manager init fail."; + ASSERT_EQ(0, manager_->Start()) << "manager start fail."; } virtual void TearDown() { @@ -75,31 +69,22 @@ class TestSnapshotServiceManager : public ::testing::Test { snapshotMetric_ = nullptr; } - void PrepareCreateSnapshot( - const std::string &file, - const std::string &user, - const std::string &desc, - UUID uuid) { + void PrepareCreateSnapshot(const std::string& file, const std::string& user, + const std::string& desc, UUID uuid) { SnapshotInfo info(uuid, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); @@ -112,8 +97,7 @@ class TestSnapshotServiceManager : public ::testing::Test { SnapshotCloneServerOptions serverOption_; }; -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -122,32 +106,25 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -155,8 +132,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPreFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -165,21 +141,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeInternalError))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotSuccessByTaskExist) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotSuccessByTaskExist) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -188,20 +156,13 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeTaskExist))); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeTaskExist))); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotPushTaskFail) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotPushTaskFail) { const std::string file1 = "file1"; const std::string user1 = "user1"; const std::string desc1 = "snap1"; @@ -209,33 +170,21 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuid1, user1, file1, desc1); EXPECT_CALL(*core_, CreateSnapshotPre(file1, user1, desc1, _)) - .WillRepeatedly(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillRepeatedly(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([] (std::shared_ptr task) { - })); + .WillOnce(Invoke([](std::shared_ptr task) {})); UUID uuid; - int ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid); + int ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); UUID uuid2; - ret = manager_->CreateSnapshot( - file1, - user1, - desc1, - &uuid2); + ret = manager_->CreateSnapshot(file1, user1, desc1, &uuid2); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -243,8 +192,7 @@ TEST_F(TestSnapshotServiceManager, ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSuccess) { +TEST_F(TestSnapshotServiceManager, TestCreateSnapshotMultiThreadSuccess) { const std::string file1 = "file1"; const std::string file2 = "file2"; const std::string file3 = "file3"; @@ -264,15 +212,9 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); std::condition_variable cv; std::mutex m; @@ -281,43 +223,28 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cv, &m, &count] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - std::unique_lock lk(m); - count++; - task->Finish(); - cv.notify_all(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cv, &m, &count](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + std::unique_lock lk(m); + count++; + task->Finish(); + cv.notify_all(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file2, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file2, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file3, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file3, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - cv.wait(lk, [&count](){return count == 3;}); + cv.wait(lk, [&count]() { return count == 3; }); - - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); ASSERT_EQ(3, snapshotMetric_->snapshotSucceed.get_value()); @@ -325,7 +252,7 @@ TEST_F(TestSnapshotServiceManager, } TEST_F(TestSnapshotServiceManager, - TestCreateSnapshotMultiThreadSameFileSuccess) { + TestCreateSnapshotMultiThreadSameFileSuccess) { const std::string file1 = "file1"; const std::string user = "user1"; const std::string desc1 = "snap1"; @@ -343,52 +270,32 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, CreateSnapshotPre(_, _, _, _)) .Times(3) - .WillOnce(DoAll( - SetArgPointee<3>(info1), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info3), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info1), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info3), Return(kErrCodeSuccess))); CountDownEvent cond1(3); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) .Times(3) - .WillRepeatedly(Invoke([&cond1] ( - std::shared_ptr task) { - task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); - - - int ret = manager_->CreateSnapshot( - file1, - user, - desc1, - &uuid); + .WillRepeatedly( + Invoke([&cond1](std::shared_ptr task) { + task->GetSnapshotInfo().SetStatus(Status::done); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file1, user, desc1, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc2, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc2, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CreateSnapshot( - file1, - user, - desc3, - &uuid); + ret = manager_->CreateSnapshot(file1, user, desc3, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -408,19 +315,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -428,7 +334,7 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } -// 删除转cancel用例 +// Delete to cancel use case TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -438,30 +344,23 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] ( - std::shared_ptr task) { - LOG(INFO) << "in HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - break; - } - } - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce(Invoke([&cond1](std::shared_ptr task) { + LOG(INFO) << "in HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + break; + } + } + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); @@ -496,19 +395,18 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -516,8 +414,6 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotByCancelByDeleteSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - - TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPreFail) { const std::string file = "file1"; const std::string user = "user1"; @@ -543,10 +439,10 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { .WillRepeatedly(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - cond1.Signal(); - })); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -555,9 +451,8 @@ TEST_F(TestSnapshotServiceManager, TestDeleteSnapshotPushTaskFail) { ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeInternalError, ret); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(1, snapshotMetric_->snapshotDoing.get_value()); @@ -579,19 +474,18 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { .WillOnce(Return(kErrCodeSuccess)); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->DeleteSnapshot(uuid, user, file); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -599,7 +493,6 @@ TEST_F(TestSnapshotServiceManager, TestCreateAndDeleteSnapshotSuccess) { ASSERT_EQ(0, snapshotMetric_->snapshotFailed.get_value()); } - TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { const std::string file = "file1"; const std::string user = "user1"; @@ -610,29 +503,22 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -659,8 +545,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -688,8 +573,7 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -702,8 +586,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillOnce(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeInternalError))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -722,8 +606,8 @@ TEST_F(TestSnapshotServiceManager, TestGetFileSnapshotInfoFail2) { snapInfo.push_back(snap1); EXPECT_CALL(*core_, GetFileSnapshotInfo(file, _)) - .WillRepeatedly(DoAll(SetArgPointee<1>(snapInfo), - Return(kErrCodeSuccess))); + .WillRepeatedly( + DoAll(SetArgPointee<1>(snapInfo), Return(kErrCodeSuccess))); std::vector fileSnapInfo; int ret = manager_->GetFileSnapshotInfo(file, user, &fileSnapInfo); @@ -740,29 +624,22 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke( - [&cond1, progress] (std::shared_ptr task) { - task->SetProgress(progress); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, progress](std::shared_ptr task) { + task->SetProgress(progress); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - const std::string file2 = "file2"; const std::string desc2 = "snap2"; UUID uuid2 = "uuid2"; @@ -789,8 +666,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { snapInfo.push_back(snap4); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // empty filter SnapshotFilterCondition filter; @@ -826,14 +702,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter uuid SnapshotFilterCondition filter2; @@ -852,14 +726,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::pending, s.GetStatus()); ASSERT_EQ(progress, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by filename SnapshotFilterCondition filter3; @@ -890,14 +762,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::error, s.GetStatus()); ASSERT_EQ(0, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by status SnapshotFilterCondition filter4; @@ -923,14 +793,12 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeSuccess))); // filter by user SnapshotFilterCondition filter5; @@ -949,8 +817,7 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterSuccess) { ASSERT_EQ(Status::done, s.GetStatus()); ASSERT_EQ(100, v.GetSnapProgress()); } else { - FAIL() << "should not exist this uuid = " - << s.GetUuid(); + FAIL() << "should not exist this uuid = " << s.GetUuid(); } } } @@ -964,8 +831,8 @@ TEST_F(TestSnapshotServiceManager, TestGetSnapshotListByFilterFail) { std::vector snapInfo; EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(snapInfo), - Return(kErrCodeInternalError))); + .WillOnce( + DoAll(SetArgPointee<0>(snapInfo), Return(kErrCodeInternalError))); SnapshotFilterCondition filter; std::vector fileSnapInfo; @@ -993,32 +860,30 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskSuccess) { list.push_back(snap2); list.push_back(snap3); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeSuccess))); CountDownEvent cond1(2); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); EXPECT_CALL(*core_, HandleDeleteSnapshotTask(_)) - .WillOnce(Invoke([&cond1] (std::shared_ptr task) { + .WillOnce(Invoke([&cond1](std::shared_ptr task) { task->GetSnapshotInfo().SetStatus(Status::done); - task->Finish(); - cond1.Signal(); - })); + task->Finish(); + cond1.Signal(); + })); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeSuccess, ret); cond1.Wait(); - std::this_thread::sleep_for( - std::chrono::milliseconds( - serverOption_.snapshotTaskManagerScanIntervalMs * 2)); + std::this_thread::sleep_for(std::chrono::milliseconds( + serverOption_.snapshotTaskManagerScanIntervalMs * 2)); ASSERT_EQ(0, snapshotMetric_->snapshotWaiting.get_value()); ASSERT_EQ(0, snapshotMetric_->snapshotDoing.get_value()); @@ -1041,15 +906,13 @@ TEST_F(TestSnapshotServiceManager, TestRecoverSnapshotTaskFail) { list.push_back(snap1); list.push_back(snap2); EXPECT_CALL(*core_, GetSnapshotList(_)) - .WillOnce(DoAll(SetArgPointee<0>(list), - Return(kErrCodeInternalError))); + .WillOnce(DoAll(SetArgPointee<0>(list), Return(kErrCodeInternalError))); int ret = manager_->RecoverSnapshotTask(); ASSERT_EQ(kErrCodeInternalError, ret); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotSuccess) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotSuccess) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1062,31 +925,27 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info2(uuidOut2, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) .Times(2) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))) - .WillOnce(DoAll( - SetArgPointee<3>(info2), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))) + .WillOnce(DoAll(SetArgPointee<3>(info2), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - LOG(INFO) << "in mock HandleCreateSnapshotTask"; - while (1) { - if (task->IsCanceled()) { - cond1.Signal(); - break; - } - } - task->Finish(); - cond2.Signal(); - })); - - // 取消排队的快照会调一次 + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + LOG(INFO) << "in mock HandleCreateSnapshotTask"; + while (1) { + if (task->IsCanceled()) { + cond1.Signal(); + break; + } + } + task->Finish(); + cond2.Signal(); + })); + + // Unqueued snapshots will be called once EXPECT_CALL(*core_, HandleCancelUnSchduledSnapshotTask(_)) .WillOnce(Return(kErrCodeSuccess)); @@ -1099,32 +958,20 @@ TEST_F(TestSnapshotServiceManager, EXPECT_CALL(*core_, HandleCancelScheduledSnapshotTask(_)) .WillOnce(Invoke(callback)); - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); - // 再打一个快照,覆盖排队的情况 - ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid2); + // Take another snapshot to cover the queuing situation + ret = manager_->CreateSnapshot(file, user, desc, &uuid2); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid2, uuidOut2); - // 先取消在排队的快照 - ret = manager_->CancelSnapshot(uuidOut2, - user, - file); + // Cancel queued snapshots first + ret = manager_->CancelSnapshot(uuidOut2, user, file); ASSERT_EQ(kErrCodeSuccess, ret); - ret = manager_->CancelSnapshot(uuidOut, - user, - file); + ret = manager_->CancelSnapshot(uuidOut, user, file); ASSERT_EQ(kErrCodeSuccess, ret); @@ -1132,8 +979,7 @@ TEST_F(TestSnapshotServiceManager, cond2.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffUser) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffUser) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1142,41 +988,32 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string user2 = "user2"; - ret = manager_->CancelSnapshot(uuidOut, - user2, - file); + ret = manager_->CancelSnapshot(uuidOut, user2, file); cond2.Signal(); ASSERT_EQ(kErrCodeInvalidUser, ret); cond1.Wait(); } -TEST_F(TestSnapshotServiceManager, - TestCancelSnapshotFailDiffFile) { +TEST_F(TestSnapshotServiceManager, TestCancelSnapshotFailDiffFile) { const std::string file = "file1"; const std::string user = "user1"; const std::string desc = "snap1"; @@ -1185,40 +1022,30 @@ TEST_F(TestSnapshotServiceManager, SnapshotInfo info(uuidOut, user, file, desc); EXPECT_CALL(*core_, CreateSnapshotPre(file, user, desc, _)) - .WillOnce(DoAll( - SetArgPointee<3>(info), - Return(kErrCodeSuccess))); + .WillOnce(DoAll(SetArgPointee<3>(info), Return(kErrCodeSuccess))); CountDownEvent cond1(1); CountDownEvent cond2(1); EXPECT_CALL(*core_, HandleCreateSnapshotTask(_)) - .WillOnce(Invoke([&cond1, &cond2] ( - std::shared_ptr task) { - cond2.Wait(); - task->Finish(); - cond1.Signal(); - })); - - int ret = manager_->CreateSnapshot( - file, - user, - desc, - &uuid); + .WillOnce( + Invoke([&cond1, &cond2](std::shared_ptr task) { + cond2.Wait(); + task->Finish(); + cond1.Signal(); + })); + + int ret = manager_->CreateSnapshot(file, user, desc, &uuid); ASSERT_EQ(kErrCodeSuccess, ret); ASSERT_EQ(uuid, uuidOut); std::string file2 = "file2"; - ret = manager_->CancelSnapshot(uuidOut, - user, - file2); + ret = manager_->CancelSnapshot(uuidOut, user, file2); cond2.Signal(); ASSERT_EQ(kErrCodeFileNameNotMatch, ret); cond1.Wait(); } - } // namespace snapshotcloneserver } // namespace curve - diff --git a/test/tools/version_tool_test.cpp b/test/tools/version_tool_test.cpp index 64581f73ac..db40892f40 100644 --- a/test/tools/version_tool_test.cpp +++ b/test/tools/version_tool_test.cpp @@ -21,21 +21,23 @@ * Copyright (c) 2018 netease */ -#include #include "src/tools/version_tool.h" + +#include + #include "test/tools/mock/mock_mds_client.h" #include "test/tools/mock/mock_metric_client.h" #include "test/tools/mock/mock_snapshot_clone_client.h" +using curve::mds::topology::ChunkServerStatus; +using curve::mds::topology::DiskState; +using curve::mds::topology::OnlineState; using ::testing::_; +using ::testing::An; +using ::testing::DoAll; using ::testing::Return; using ::testing::ReturnRef; -using ::testing::DoAll; using ::testing::SetArgPointee; -using ::testing::An; -using curve::mds::topology::ChunkServerStatus; -using curve::mds::topology::DiskState; -using curve::mds::topology::OnlineState; namespace curve { namespace tool { @@ -53,8 +55,8 @@ class VersionToolTest : public ::testing::Test { metricClient_ = nullptr; } - void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo *csInfo, - uint64_t csId) { + void GetCsInfoForTest(curve::mds::topology::ChunkServerInfo* csInfo, + uint64_t csId) { csInfo->set_chunkserverid(csId); csInfo->set_disktype("ssd"); csInfo->set_hostip("127.0.0.1"); @@ -73,64 +75,61 @@ class VersionToolTest : public ::testing::Test { TEST_F(VersionToolTest, GetAndCheckMdsVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分mds curve_version失败 + // 2. Obtain partial mds curve_version failed EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); ASSERT_EQ(0, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); ASSERT_EQ(-1, versionTool.GetAndCheckMdsVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*mdsClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); @@ -151,123 +150,112 @@ TEST_F(VersionToolTest, GetChunkServerVersion) { chunkservers.emplace_back(csInfo); } - // 1、正常情况 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 1. Normal situation + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、ListChunkServersInCluster失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 2. ListChunkServersInCluster failed + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) .WillOnce(Return(-1)); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); - // 3、获取metric失败 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 3. Failed to obtain metric + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); std::vector expectList = {"127.0.0.1:9191"}; ASSERT_EQ(expectList, failedList); - // 4、chunkserverList为空 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 4. chunkserverList is empty + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(std::vector()), - Return(0))); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillOnce( + DoAll(SetArgPointee<0>(std::vector()), Return(0))); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、version不一致 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 5. version inconsistency + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(-1, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ(-1, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 6、老版本 - EXPECT_CALL(*mdsClient_, ListChunkServersInCluster( - An*>())) + // 6. Old version + EXPECT_CALL(*mdsClient_, + ListChunkServersInCluster(An*>())) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(chunkservers), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(chunkservers), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(5) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckChunkServerVersion(&version, - &failedList)); + ASSERT_EQ(0, + versionTool.GetAndCheckChunkServerVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); } TEST_F(VersionToolTest, GetClientVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::vector clientAddrs = - {"127.0.0.1:8000", "127.0.0.1:8001", "127.0.0.1:8002", - "127.0.0.1:8003", "127.0.0.1:8004", "127.0.0.1:8005"}; + std::vector clientAddrs = {"127.0.0.1:8000", "127.0.0.1:8001", + "127.0.0.1:8002", "127.0.0.1:8003", + "127.0.0.1:8004", "127.0.0.1:8005"}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*mdsClient_, ListClient(_, _)) .Times(1) - .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), - Return(0))); + .WillOnce(DoAll(SetArgPointee<0>(clientAddrs), Return(0))); EXPECT_CALL(*metricClient_, GetMetric(_, kProcessCmdLineMetricName, _)) .Times(6) .WillOnce(Return(MetricRet::kOtherErr)) - .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessPython), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>(kProcessOther), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>(kProcessQemu), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessPython), Return(MetricRet::kOK))) + .WillOnce( + DoAll(SetArgPointee<2>(kProcessOther), Return(MetricRet::kOK))) .WillRepeatedly(DoAll(SetArgPointee<2>(kProcessNebdServer), - Return(MetricRet::kOK))); + Return(MetricRet::kOK))); EXPECT_CALL(*metricClient_, GetMetric(_, kCurveVersionMetricName, _)) .Times(5) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.3"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)) .WillOnce(Return(MetricRet::kNotFound)) - .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), - Return(MetricRet::kOK))); + .WillOnce(DoAll(SetArgPointee<2>("0.0.5.2"), Return(MetricRet::kOK))); ClientVersionMapType clientVersionMap; ClientVersionMapType expected; VersionMapType versionMap = {{"0.0.5.2", {"127.0.0.1:8004"}}, @@ -282,85 +270,80 @@ TEST_F(VersionToolTest, GetClientVersion) { ASSERT_EQ(0, versionTool.GetClientVersion(&clientVersionMap)); ASSERT_EQ(expected, clientVersionMap); - // 2、ListClient失败 - EXPECT_CALL(*mdsClient_, ListClient(_, _)) - .Times(1) - .WillOnce(Return(-1)); + // 2. ListClient failed + EXPECT_CALL(*mdsClient_, ListClient(_, _)).Times(1).WillOnce(Return(-1)); ASSERT_EQ(-1, versionTool.GetClientVersion(&clientVersionMap)); } TEST_F(VersionToolTest, GetAndCheckSnapshotCloneVersion) { VersionTool versionTool(mdsClient_, metricClient_, snapshotClient_); - std::map dummyServerMap = - {{"127.0.0.1:6666", "127.0.0.1:6667"}, - {"127.0.0.1:6668", "127.0.0.1:6669"}, - {"127.0.0.1:6670", "127.0.0.1:6671"}}; + std::map dummyServerMap = { + {"127.0.0.1:6666", "127.0.0.1:6667"}, + {"127.0.0.1:6668", "127.0.0.1:6669"}, + {"127.0.0.1:6670", "127.0.0.1:6671"}}; - // 1、正常情况 + // 1. Normal situation EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); std::string version; std::vector failedList; - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); ASSERT_TRUE(failedList.empty()); - // 2、获取部分curve_version失败 + // 2. Obtain partial curve_version failed EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillOnce(Return(MetricRet::kOtherErr)) - .WillRepeatedly(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + .WillRepeatedly( + DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("0.0.1", version); std::vector expectedList = {"127.0.0.1:6667"}; ASSERT_EQ(expectedList, failedList); - // 3、dummyServerMap为空 + // 3. dummyServerMap is empty std::map dummyServerMap2; EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap2)); - EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) - .Times(0); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + EXPECT_CALL(*metricClient_, GetMetric(_, _, _)).Times(0); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 4、version不一致 + // 4. version inconsistency EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) - .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), - Return(MetricRet::kOK))) - .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), - Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.2"), Return(MetricRet::kOK))) + .WillOnce(DoAll(SetArgPointee<2>("0.0.1"), Return(MetricRet::kOK))) .WillOnce(Return(MetricRet::kNotFound)); - ASSERT_EQ(-1, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + -1, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_TRUE(failedList.empty()); - // 5、老版本mds + // 5. Old version of mds EXPECT_CALL(*snapshotClient_, GetDummyServerMap()) .Times(1) .WillOnce(ReturnRef(dummyServerMap)); EXPECT_CALL(*metricClient_, GetMetric(_, _, _)) .Times(3) .WillRepeatedly(Return(MetricRet::kNotFound)); - ASSERT_EQ(0, versionTool.GetAndCheckSnapshotCloneVersion(&version, - &failedList)); + ASSERT_EQ( + 0, versionTool.GetAndCheckSnapshotCloneVersion(&version, &failedList)); ASSERT_EQ("before-0.0.5.2", version); ASSERT_TRUE(failedList.empty()); }