常用命令

Ceph管理

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# 查看ceph状态
[root@rook-ceph-tools-69f7b87b57-vwl67 /]# ceph -s
cluster:
id: e3f8d4d7-6888-4f67-b740-5a80da34b3dc
health: HEALTH_WARN
Degraded data redundancy: 988/11616 objects degraded (8.506%), 142 pgs degraded, 192 pgs undersized
application not enabled on 2 pool(s)

services:
mon: 1 daemons, quorum a (age 7d)
mgr: a(active, since 20h)
mds: myfs:1 {0=myfs-b=up:active} 1 up:standby-replay
osd: 2 osds: 2 up (since 25h), 2 in (since 7d); 1 remapped pgs
rgw: 1 daemon active (ceph-rgw-7f89b49bb8-ksdhr)

task status:
scrub status:
mds.myfs-a: idle
mds.myfs-b: idle

data:
pools: 11 pools, 480 pgs
objects: 6.14k objects, 35 GiB
usage: 70 GiB used, 30 GiB / 100 GiB avail
pgs: 988/11616 objects degraded (8.506%)
16/11616 objects misplaced (0.138%)
288 active+clean
142 active+undersized+degraded
49 active+undersized
1 active+undersized+remapped

io:
client: 1.2 KiB/s rd, 171 KiB/s wr, 2 op/s rd, 17 op/s wr

# 查看ceph使用率
[root@rook-ceph-tools-69f7b87b57-vwl67 /]# ceph df
RAW STORAGE:
CLASS SIZE AVAIL USED RAW USED %RAW USED
hdd 100 GiB 30 GiB 68 GiB 70 GiB 69.83
TOTAL 100 GiB 30 GiB 68 GiB 70 GiB 69.83

POOLS:
POOL ID STORED OBJECTS USED %USED MAX AVAIL
myfs-metadata 1 2.5 MiB 25 3 MiB 0.01 25 GiB
kube 2 852 MiB 297 923 MiB 3.47 24 GiB
myfs-data0 3 3.9 MiB 180 13 MiB 0.05 25 GiB
cinder-volumes 4 70 MiB 151 146 MiB 0.56 13 GiB
cinder.backups 5 0 B 0 0 B 0 13 GiB
cinder.volumes 6 0 B 0 0 B 0 25 GiB
.rgw.root 7 1.2 KiB 4 256 KiB 0 25 GiB
default.rgw.control 8 0 B 8 0 B 0 25 GiB
default.rgw.meta 9 0 B 0 0 B 0 25 GiB
default.rgw.log 10 0 B 1.16k 0 B 0 25 GiB
images 11 33 GiB 4.31k 67 GiB 72.66 13 GiB

# 查看ceph使用率
[root@rook-ceph-tools-c9479bfb9-lljmc /]# ceph osd pool ls detail
pool 1 'myfs-metadata' replicated size 3 min_size 1 crush_rule 1 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 148 flags hashpspool stripe_width 0 pg_autoscale_bias 4 pg_num_min 16 recovery_priority 5 application cephfs
pool 2 'kube' replicated size 3 min_size 1 crush_rule 2 object_hash rjenkins pg_num 8 pgp_num 8 autoscale_mode warn last_change 294 lfor 0/294/292 flags hashpspool stripe_width 0 application rbd
pool 3 'myfs-data0' replicated size 3 min_size 1 crush_rule 3 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 149 flags hashpspool stripe_width 0 application cephfs
pool 4 'cinder-volumes' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 128 pgp_num 128 autoscale_mode warn last_change 352 lfor 335/335/346 flags hashpspool,selfmanaged_snaps tiers 12 read_tier 12 write_tier 12 stripe_width 0 application rbd
removed_snaps [1~3]
pool 5 'cinder.backups' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 352 lfor 0/0/348 flags hashpspool stripe_width 0 application rbd
pool 6 '.rgw.root' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 306 flags hashpspool stripe_width 0 application rgw
pool 7 'default.rgw.control' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 308 flags hashpspool stripe_width 0 application rgw
pool 8 'default.rgw.meta' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 310 flags hashpspool stripe_width 0 application rgw
pool 9 'default.rgw.log' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 312 flags hashpspool stripe_width 0 application rgw
pool 10 'images' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 32 pgp_num 32 autoscale_mode warn last_change 352 lfor 0/0/350 flags hashpspool,selfmanaged_snaps stripe_width 0 application rbd
removed_snaps [1~3]
pool 11 'default.rgw.buckets.index' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 319 flags hashpspool stripe_width 0 application rgw
pool 12 'ssd_cache' replicated size 1 min_size 1 crush_rule 4 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 352 lfor 335/335/335 flags hashpspool,incomplete_clones,selfmanaged_snaps tier_of 4 cache_mode writeback target_bytes 14173392076 target_objects 1000000 hit_set bloom{false_positive_probability: 0.05, target_size: 0, seed: 0} 0s x0 decay_rate 0 search_last_n 0 stripe_width 0 application rbd
removed_snaps [1~3]
pool 13 'default.rgw.buckets.data' replicated size 3 min_size 1 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode warn last_change 413 flags hashpspool stripe_width 0 application rgw
pool 14 'ecpool' erasure size 3 min_size 2 crush_rule 5 object_hash rjenkins pg_num 50 pgp_num 50 autoscale_mode warn last_change 421 flags hashpspool,ec_overwrites,selfmanaged_snaps stripe_width 8192
removed_snaps [1~3]
removed_snaps_queue [2~1]

RBD管理

基础

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
# 查看指定存储池内rbd list
[root@rook-ceph-tools-69f7b87b57-vwl67 /]# rbd ls images
00cbc7af-c8ab-45db-9b81-8674b536b695
15bb8b35-dedd-4ba6-88e1-4d5d6dc8a65b
18f09bec-b9f9-4a63-a2b4-5ab3ceed19f7
32581d85-140a-432f-b555-0c23ca00779c
3e840866-668e-4527-9916-303b66e7f82b

# 查看指定rbd信息
[root@rook-ceph-tools-69f7b87b57-vwl67 /]# rbd info images/32581d85-140a-432f-b555-0c23ca00779c
rbd image '32581d85-140a-432f-b555-0c23ca00779c':
size 819 MiB in 103 objects
order 23 (8 MiB objects)
snapshot_count: 1
id: 11470c8980729e
block_name_prefix: rbd_data.11470c8980729e
format: 2
features: layering
op_features:
flags:
create_timestamp: Thu Jul 15 01:08:31 2021
access_timestamp: Thu Jul 15 01:09:34 2021
modify_timestamp: Thu Jul 15 01:08:31 2021

快照

1
2
3
4
5
6
7
8
# 查看指定rbd的快照信息
[root@rook-ceph-tools-69f7b87b57-vwl67 /]# rbd snap ls images/32581d85-140a-432f-b555-0c23ca00779c
SNAPID NAME SIZE PROTECTED TIMESTAMP
54 snap 819 MiB yes Thu Jul 15 02:17:57 2021
55 testsnap 819 MiB Thu Jul 15 02:38:14 2021

# 创建快照
rbd snap add < pool name >/< rbd name >@< snap name >