Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

translate Chinese annotations to English annotations #2754

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
4 changes: 2 additions & 2 deletions WORKSPACE
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,8 @@ bind(
actual = "@com_google_googletest//:gtest",
)

#Import the glog files.
# brpc内BUILD文件在依赖glog时, 直接指定的依赖是"@com_github_google_glog//:glog"
# Import the glog files.
# When the BUILD file in brpc relies on glog, the direct specified dependency is "@com_github_google_glog//:glog"
git_repository(
name = "com_github_google_glog",
remote = "https://github.com/google/glog",
Expand Down
10 changes: 5 additions & 5 deletions build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
#

dir=`pwd`
#step1 清除生成的目录和文件
# step1 Clear generated directories and files
bazel clean
rm -rf curvefs_python/BUILD
rm -rf curvefs_python/tmplib/
Expand All @@ -29,16 +29,16 @@ then
exit
fi

#step2 获取tag版本和git提交版本信息
#获取tag版本
# step2 Obtaining Tag Version and Git Submission Version Information
# Get Tag Version
tag_version=`git status | grep -w "HEAD detached at" | awk '{print $NF}' | awk -F"v" '{print $2}'`
if [ -z ${tag_version} ]
then
echo "not found version info, set version to 9.9.9"
tag_version=9.9.9
fi

#获取git提交版本信息
# Obtain git submission version information
commit_id=`git show --abbrev-commit HEAD|head -n 1|awk '{print $2}'`
if [ "$1" = "debug" ]
then
Expand All @@ -50,7 +50,7 @@ fi
curve_version=${tag_version}+${commit_id}${debug}


#step3 执行编译
# step3 Execute Compilation
# check bazel verion, bazel vesion must = 4.2.2
bazel_version=`bazel version | grep "Build label" | awk '{print $3}'`
if [ -z ${bazel_version} ]
Expand Down
144 changes: 72 additions & 72 deletions conf/chunkserver.conf
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
#
# Global settings
#
# log等级INFO=0/WARNING=1/ERROR=2/FATAL=3
# Log level INFO=0/WARNING=1/ERROR=2/FATAL=3
global.ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_addr} __CURVEADM_TEMPLATE__
global.port=8200 # __CURVEADM_TEMPLATE__ ${service_port} __CURVEADM_TEMPLATE__
global.subnet=127.0.0.0/24
global.enable_external_server=true
global.external_ip=127.0.0.1 # __CURVEADM_TEMPLATE__ ${service_external_addr} __CURVEADM_TEMPLATE__
global.external_subnet=127.0.0.0/24
# chunk大小,一般16MB
# Chunk size, usually 16MB
# it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true
global.chunk_size=16777216
# chunk 元数据页大小,一般4KB
# Chunk metadata page size, usually 4KB
# it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true
global.meta_page_size=4096
# chunk's block size, IO requests must align with it, supported value is |512| and |4096|
Expand All @@ -21,40 +21,40 @@ global.meta_page_size=4096
# it will be overwritten from chunkfilepool.meta if `chunkfilepool.enable_get_chunk_from_pool` is true
global.block_size=4096

# clone chunk允许的最长location长度
# The maximum allowed location length for clone chunks
global.location_limit=3000

#
# MDS settings
#
#支持mds多地址,以逗号分隔 127.0.0.1:6666,127.0.0.1:7777
# Supports MDS multiple addresses, separated by commas 127.0.0.1:6666, 127.0.0.1:7777
mds.listen.addr=127.0.0.1:6666 # __CURVEADM_TEMPLATE__ ${cluster_mds_addr} __CURVEADM_TEMPLATE__
# 向mds注册的最大重试次数
# Maximum number of retries registered with mds
mds.register_retries=100
# 向mds注册的rpc超时时间,一般1000ms
# RPC timeout for registering with mds, typically 1000ms
mds.register_timeout=1000
# 向mds发送心跳的间隔,一般10s
# The interval between sending heartbeat to MDS, usually 10 seconds
mds.heartbeat_interval=10
# 向mds发送心跳的rpc超时间,一般1000ms
# Send rpc timeout of heartbeat to mds, usually 1000ms
mds.heartbeat_timeout=5000

#
# Chunkserver settings
#
# chunkserver主目录
# Chunkserver home directory
chunkserver.stor_uri=local://./0/ # __CURVEADM_TEMPLATE__ local://${prefix}/data __CURVEADM_TEMPLATE__
# chunkserver元数据文件
# Chunkserver metadata file
chunkserver.meta_uri=local://./0/chunkserver.dat # __CURVEADM_TEMPLATE__ local://${prefix}/data/chunkserver.dat __CURVEADM_TEMPLATE__
# disk类型
# Disk type
chunkserver.disk_type=nvme
# raft内部install snapshot带宽上限,一般20MB
# Raft internal install snapshot bandwidth limit, usually 20MB
chunkserver.snapshot_throttle_throughput_bytes=20971520
# check cycles是为了更精细的进行带宽控制,以snapshotThroughputBytes=100MB
# check cycles=10为例,它可以保证每1/10秒的带宽是10MB,且不累积,例如第1个
# 1/10秒的带宽是10MB,但是就过期了,在第2个1/10秒依然只能用10MB的带宽,而
# 不是20MB的带宽
# Check cycles are used for more precise bandwidth control, with snapshots ThroughputBytes=100MB,
# Taking check cycles=10 as an example, it can ensure that the bandwidth is 10MB every 1/10 second and does not accumulate, such as the first one
# The bandwidth of 1/10 second is 10MB, but it expires. In the second 1/10 second, only 10MB of bandwidth can be used, and
# Not a bandwidth of 20MB
chunkserver.snapshot_throttle_check_cycles=4
# 限制inflight io数量,一般是5000
# Limit the number of inflight io, usually 5000
chunkserver.max_inflight_requests=5000

#
Expand All @@ -70,43 +70,43 @@ test.testcopyset_conf=127.0.0.1:8200:0,127.0.0.1:8201:0,127.0.0.1:8202:0
#
# lease read switch, default is true(open lease read)
# if false, all requests will propose to raft(log read)
# 启用lease read,一般开启,否则将退化为log read形式
# Enable lease read, usually enabled, otherwise it will revert to log read form
copyset.enable_lease_read=true
# 是否检查任期,一般检查
# Whether to check the term of office, general inspection
copyset.check_term=true
# 是否关闭raft配置变更的服务,一般不关闭
# Do you want to close the service for raft configuration changes? Generally, it is not closed
copyset.disable_cli=false
copyset.log_applied_task=false
# raft选举超时时间,一般是5000ms
# Raft election timeout, usually 5000ms
copyset.election_timeout_ms=1000
# raft打快照间隔,一般是1800s,也就是30分钟
# The snapshot interval for the raft is usually 1800s, which is 30 minutes
copyset.snapshot_interval_s=1800
# add一个节点,add的节点首先以类似learner的角色拷贝数据
# 在跟leader差距catchup_margin个entry的时候,leader
# 会尝试将配置变更的entry进行提交(一般来说提交的entry肯定
# 会commit&apply,catchup_margin较小可以大概率确保learner
# 后续很快可以加入复制组
# Add a node, and the added node first acts like a learner to copy data.
# When the gap with the leader is equal to catchup_margin entries, the leader
# will attempt to commit the configuration change entry (generally, the committed entry
# will definitely be committed and applied). A small catchup_margin can
# ensure that the learner can join the replication group quickly.
copyset.catchup_margin=1000
# copyset chunk数据目录
# Copyset chunk data directory
copyset.chunk_data_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# raft wal log目录
# Raft wal log directory
copyset.raft_log_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# raft元数据目录
# Raft metadata directory
copyset.raft_meta_uri=local://./0/copysets # __CURVEADM_TEMPLATE__ local://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# raft snapshot目录
# Raft snapshot directory
copyset.raft_snapshot_uri=curve://./0/copysets # __CURVEADM_TEMPLATE__ curve://${prefix}/data/copysets __CURVEADM_TEMPLATE__
# copyset回收目录
# Copyset Recycle Directory
copyset.recycler_uri=local://./0/recycler # __CURVEADM_TEMPLATE__ local://${prefix}/data/recycler __CURVEADM_TEMPLATE__
# chunkserver启动时,copyset并发加载的阈值,为0则表示不做限制
# When chunkserver is started, the threshold for copyset concurrent loading is set to 0, indicating no restrictions are imposed
copyset.load_concurrency=10
# chunkserver use how many threads to use copyset complete sync.
copyset.sync_concurrency=20
# 检查copyset是否加载完成出现异常时的最大重试次数
# Check if the copyset has completed loading and the maximum number of retries when an exception occurs
copyset.check_retrytimes=3
# 当前peer的applied_index与leader上的committed_index差距小于该值
# 则判定copyset已经加载完成
# The current peer's applied_index and leader‘s committed_index difference is less than this value
# Then it is determined that the copyset has been loaded successfully
copyset.finishload_margin=2000
# 循环判定copyset是否加载完成的内部睡眠时间
# Internal sleep time for loop determination of whether copyset has been loaded and completed
copyset.check_loadmargin_interval_ms=1000
# scan copyset interval
copyset.scan_interval_sec=5
Expand All @@ -132,34 +132,34 @@ copyset.check_syncing_interval_ms=500
#
# Clone settings
#
# 禁止使用curveclient
# Prohibit the use of curveclient
clone.disable_curve_client=false
# 禁止使用s3adapter
# Prohibit the use of s3adapter
clone.disable_s3_adapter=false
# 克隆的分片大小,一般1MB
# The shard size of the clone, usually 1MB
clone.slice_size=1048576
# 读clone chunk时是否需要paste到本地
# 该配置对recover chunk请求类型无效
# Whether to paste the local location when reading the clone chunk
# This configuration is not valid for the recover chunk request type
clone.enable_paste=false
# 克隆的线程数量
# Number of cloned threads
clone.thread_num=10
# 克隆的队列深度
# Queue depth for cloning
clone.queue_depth=6000
# curve用户名
# Curve username
curve.root_username=root
# curve密码
# Curve password
curve.root_password=root_password
# client配置文件
# Client configuration file
curve.config_path=conf/cs_client.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/cs_client.conf __CURVEADM_TEMPLATE__
# s3配置文件
# S3 configuration file
s3.config_path=conf/s3.conf # __CURVEADM_TEMPLATE__ ${prefix}/conf/s3.conf __CURVEADM_TEMPLATE__
# Curve File time to live
curve.curve_file_timeout_s=30

#
# Local FileSystem settings
#
# 是否开启使用renameat2,ext4内核3.15以后开始支持
# Whether to enable the use of renameat2, ext4 kernel support starting from 3.15 onwards
fs.enable_renameat2=true

#
Expand All @@ -179,27 +179,27 @@ storeng.sync_write=false

#
# Concurrent apply module
# 并发模块写线程的并发度,一般是10
# The concurrency of concurrent module writing threads is generally 10
wconcurrentapply.size=10
# 并发模块写线程的队列深度
# Queue depth of concurrent module write threads
wconcurrentapply.queuedepth=1
# 并发模块读线程的并发度,一般是5
# The concurrency of concurrent module read threads is generally 5
rconcurrentapply.size=5
# 并发模块读线程的队列深度
# Queue depth of concurrent module read threads
rconcurrentapply.queuedepth=1

#
# Chunkfile pool
#
# 是否开启从chunkfilepool获取chunk,一般是true
# Whether to enable obtaining chunks from chunkfilepool, usually true
chunkfilepool.enable_get_chunk_from_pool=true
# chunkfilepool目录
# chunkfilepool directory
chunkfilepool.chunk_file_pool_dir=./0/chunks # __CURVEADM_TEMPLATE__ ${prefix}/data __CURVEADM_TEMPLATE__
# chunkfilepool meta文件路径
# chunkfilepool meta file path
chunkfilepool.meta_path=./chunkfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/chunkfilepool.meta __CURVEADM_TEMPLATE__
# chunkfilepool meta文件大小
# chunkfilepool meta file size
chunkfilepool.cpmeta_file_size=4096
# chunkfilepool get chunk最大重试次数
# chunkfilepool get chunk maximum retry count
chunkfilepool.retry_times=5
# Enable clean chunk
chunkfilepool.clean.enable=true
Expand All @@ -219,23 +219,23 @@ chunkfilepool.thread_num=1
#
# WAL file pool
#
# walpool是否共用chunkfilepool,如果为true,从第三条开始配置无效
# Does walpool share chunkfilepool? If true, the configuration is invalid starting from the third entry
walfilepool.use_chunk_file_pool=true
# WALpool和ChunkFilePool共用时启用,在容量分配时会预留walpool的空间
# Enable when WALpool and ChunkFilePool are shared, and reserve space for WALpool during capacity allocation
walfilepool.use_chunk_file_pool_reserve=15
# 是否开启从walfilepool获取chunk,一般是true
# Whether to enable obtaining chunks from walfilepool, usually true
walfilepool.enable_get_segment_from_pool=true
# walpool目录
# Walpool directory
walfilepool.file_pool_dir=./0/ # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__
# walpool meta文件路径
# Walpool Meta File Path
walfilepool.meta_path=./walfilepool.meta # __CURVEADM_TEMPLATE__ ${prefix}/data/walfilepool.meta __CURVEADM_TEMPLATE__
# walpool meta文件大小
# Walpool Meta File Size
walfilepool.segment_size=8388608
# WAL metapage大小
# WAL metapage size
walfilepool.metapage_size=4096
# WAL filepool 元数据文件大小
# WAL filepool metadata file size
walfilepool.meta_file_size=4096
# WAL filepool get chunk最大重试次数
# WAL filepool get chunk maximum retry count
walfilepool.retry_times=5
# Whether allocate filePool by percent of disk size.
walfilepool.allocated_by_percent=true
Expand All @@ -249,14 +249,14 @@ walfilepool.thread_num=1
#
# trash settings
#
# chunkserver回收数据彻底删除的过期时间
# The expiration time for chunkserver to completely delete data for recycling
trash.expire_afterSec=300
# chunkserver检查回收数据过期时间的周期
# Chunkserver checks the cycle of recycling data expiration time
trash.scan_periodSec=120

# common option
#
# chunkserver 日志存放文件夹
# Chunkserver log storage folder
chunkserver.common.logDir=./ # __CURVEADM_TEMPLATE__ ${prefix}/logs __CURVEADM_TEMPLATE__
# 单元测试情况下
# In the case of unit testing
# chunkserver.common.logDir=./runlog/
Loading
Loading