怎么使用crushtool
更新:HHH   时间:2023-1-7


今天就跟大家聊聊有关怎么使用crushtool,可能很多人都不太了解,为了让大家更加了解,小编给大家总结了以下内容,希望大家根据这篇文章可以有所收获。

#使用crushtool 创建一个命名为crushmap128的crushmap(二进制,未解码),包含128osd设备,每个host包含8个osd,每个机架里包含4host,所有的rack都在一个root default

crushtool --outfn crushmap128 --build --num_osds 128 host straw 8 rack straw 4 default straw 0

或者使用straw2算法

crushtool --outfn crushmap128 --build --num_osds 128 host straw2 8 rack straw2 4 default straw2 0

解码

crushtool -d crushmap128 -o map128.txt

显示 ceph osd tree

crushtool -i  crushmap128 --tree
[root@ceph01 test]# crushtool -i  crushmap128 --tree
ID      WEIGHT  TYPE NAME
-21     128.00000       default default
-17     32.00000                rack rack0
-1      8.00000                 host host0
0       1.00000                         osd.0
1       1.00000                         osd.1
2       1.00000                         osd.2
3       1.00000                         osd.3
4       1.00000                         osd.4
5       1.00000                         osd.5
6       1.00000                         osd.6
7       1.00000                         osd.7
-2      8.00000                 host host1
8       1.00000                         osd.8
9       1.00000                         osd.9
10      1.00000                         osd.10
11      1.00000                         osd.11
12      1.00000                         osd.12
13      1.00000                         osd.13
14      1.00000                         osd.14
15      1.00000                         osd.15
-3      8.00000                 host host2
16      1.00000                         osd.16
17      1.00000                         osd.17
18      1.00000                         osd.18
19      1.00000                         osd.19
20      1.00000                         osd.20
21      1.00000                         osd.21
22      1.00000                         osd.22
23      1.00000                         osd.23
-4      8.00000                 host host3
24      1.00000                         osd.24
25      1.00000                         osd.25
26      1.00000                         osd.26
27      1.00000                         osd.27
28      1.00000                         osd.28
29      1.00000                         osd.29
30      1.00000                         osd.30
31      1.00000                         osd.31
-18     32.00000                rack rack1
-5      8.00000                 host host4
32      1.00000                         osd.32
33      1.00000                         osd.33
34      1.00000                         osd.34
35      1.00000                         osd.35
36      1.00000                         osd.36
37      1.00000                         osd.37
38      1.00000                         osd.38
39      1.00000                         osd.39
-6      8.00000                 host host5
40      1.00000                         osd.40
41      1.00000                         osd.41
42      1.00000                         osd.42
43      1.00000                         osd.43
44      1.00000                         osd.44
45      1.00000                         osd.45
46      1.00000                         osd.46
47      1.00000                         osd.47
-7      8.00000                 host host6
48      1.00000                         osd.48
49      1.00000                         osd.49
50      1.00000                         osd.50
51      1.00000                         osd.51
52      1.00000                         osd.52
53      1.00000                         osd.53
54      1.00000                         osd.54
55      1.00000                         osd.55
-8      8.00000                 host host7
56      1.00000                         osd.56
57      1.00000                         osd.57
58      1.00000                         osd.58
59      1.00000                         osd.59
60      1.00000                         osd.60
61      1.00000                         osd.61
62      1.00000                         osd.62
63      1.00000                         osd.63
-19     32.00000                rack rack2
-9      8.00000                 host host8
64      1.00000                         osd.64
65      1.00000                         osd.65
66      1.00000                         osd.66
67      1.00000                         osd.67
68      1.00000                         osd.68
69      1.00000                         osd.69
70      1.00000                         osd.70
71      1.00000                         osd.71
-10     8.00000                 host host9
72      1.00000                         osd.72
73      1.00000                         osd.73
74      1.00000                         osd.74
75      1.00000                         osd.75
76      1.00000                         osd.76
77      1.00000                         osd.77
78      1.00000                         osd.78
79      1.00000                         osd.79
-11     8.00000                 host host10
80      1.00000                         osd.80
81      1.00000                         osd.81
82      1.00000                         osd.82
83      1.00000                         osd.83
84      1.00000                         osd.84
85      1.00000                         osd.85
86      1.00000                         osd.86
87      1.00000                         osd.87
-12     8.00000                 host host11
88      1.00000                         osd.88
89      1.00000                         osd.89
90      1.00000                         osd.90
91      1.00000                         osd.91
92      1.00000                         osd.92
93      1.00000                         osd.93
94      1.00000                         osd.94
95      1.00000                         osd.95
-20     32.00000                rack rack3
-13     8.00000                 host host12
96      1.00000                         osd.96
97      1.00000                         osd.97
98      1.00000                         osd.98
99      1.00000                         osd.99
100     1.00000                         osd.100
101     1.00000                         osd.101
102     1.00000                         osd.102
103     1.00000                         osd.103
-14     8.00000                 host host13
104     1.00000                         osd.104
105     1.00000                         osd.105
106     1.00000                         osd.106
107     1.00000                         osd.107
108     1.00000                         osd.108
109     1.00000                         osd.109
110     1.00000                         osd.110
111     1.00000                         osd.111
-15     8.00000                 host host14
112     1.00000                         osd.112
113     1.00000                         osd.113
114     1.00000                         osd.114
115     1.00000                         osd.115
116     1.00000                         osd.116
117     1.00000                         osd.117
118     1.00000                         osd.118
119     1.00000                         osd.119
-16     8.00000                 host host15
120     1.00000                         osd.120
121     1.00000                         osd.121
122     1.00000                         osd.122
123     1.00000                         osd.123
124     1.00000                         osd.124
125     1.00000                         osd.125
126     1.00000                         osd.126
127     1.00000                         osd.127

编辑role规则

vim map128.txt  #修改rule部分

###3副本都在一个rack且在同一个host内

rule replicated_ruleset {
        ruleset 0
        type replicated
        min_size 1
        max_size 10
        step take default
        step choose firstn 1 type host
        step chooseleaf firstn 3 type osd
        step emit
}

测试结果(测试规则0 ,总共1..5个对象, 3副本) 
rule 0 (replicated_ruleset), x = 1..5, numrep = 3..3
CRUSH rule 0 x 1 [80,84,87]        对象1的3个副本在 osd.80,osd.84,osd.87
CRUSH rule 0 x 2 [63,58,61]        对象2的3个副本在 osd.63,osd.58,osd.61
CRUSH rule 0 x 3 [121,127,124]
CRUSH rule 0 x 4 [67,71,65]
CRUSH rule 0 x 5 [45,47,46]

###3副本都在一个rack里,可能在同一个host内

rule replicated_ruleset {
        ruleset 0
        type replicated
        min_size 1
        max_size 10
        step take default
        step choose firstn 1 type rack
        step chooseleaf firstn 3 type osd
        step emit
}

测试结果
rule 0 (replicated_ruleset), x = 1..5, numrep = 3..3
CRUSH rule 0 x 1 [80,84,67]
CRUSH rule 0 x 2 [63,50,48]
CRUSH rule 0 x 3 [121,127,111]
CRUSH rule 0 x 4 [67,86,79]
CRUSH rule 0 x 5 [45,38,46]

###3副本都在一个rack里,不在同一个host内

rule replicated_ruleset {
        ruleset 0
        type replicated
        min_size 1
        max_size 10
        step take default
        step choose firstn 1 type rack
        step chooseleaf firstn 3 type host
        step emit
}

测试结果
rule 0 (replicated_ruleset), x = 1..5, numrep = 3..3
CRUSH rule 0 x 1 [80,70,79]
CRUSH rule 0 x 2 [63,48,42]
CRUSH rule 0 x 3 [121,109,113]
CRUSH rule 0 x 4 [67,82,76]
CRUSH rule 0 x 5 [45,36,57]

###3副本在3个rack里

rule replicated_ruleset {
        ruleset 0
        type replicated
        min_size 1
        max_size 10
        step take default
        step choose firstn 3 type rack
        step chooseleaf firstn 1 type host
        step emit
}


测试结果
rule 0 (replicated_ruleset), x = 1..5, numrep = 3..3
CRUSH rule 0 x 1 [80,115,43]
CRUSH rule 0 x 2 [63,7,126]
CRUSH rule 0 x 3 [121,30,73]
CRUSH rule 0 x 4 [67,8,61]
CRUSH rule 0 x 5 [45,79,28]

编码

crushtool  -c map128.txt  -o maptmp.bin

测试

                                         显示统计信息       测试rule0   上传对象最小1个最多5个  3副本     显示映射关系     显示ceph osd tree
crushtool -i maptmp.bin  --test --show-statistics --rule 0 --min-x 1 --max-x 5 --num-rep 3  --show-mappings --tree
#添加机架
ceph osd crush add-bucket rack01 rack
ceph osd crush add-bucket rack02 rack
ceph osd crush add-bucket rack03 rack

#移动主机到机架
ceph osd crush move ceph23 rack=rack01
ceph osd crush move ceph24 rack=rack02
ceph osd crush move ceph25 rack=rack03

#移动机架到default root
ceph osd crush move rack01 root=default
ceph osd crush move rack02 root=default
ceph osd crush move rack03 root=default

得到新的crushmap


#移动好后使用测试的rule规则和新的crushmap导入到集群中,注意测试的crushmap中的osd编号不一定和实际的编号相同,因此子需要测试后确定rule正确即可

测试完成后导入集群

导出crushmap
ceph osd getcrushmap -o ma-crush-map
解码crushmap
crushtool -d ma-crush-map -o ma-crush-map.txt
vim ma-crush-map.txt  #修改rule部分为上面测试过的rule

编译crushmap
crushtool -c ma-crush-map.txt -o ma-nouvelle-crush-map
导入crushmap
ceph osd setcrushmap -i ma-nouvelle-crush-map

看完上述内容,你们对怎么使用crushtool有进一步的了解吗?如果还想了解更多知识或者相关内容,请关注天达云行业资讯频道,感谢大家的支持。

返回云计算教程...