- 积分
- 16840
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 17:03:34
|
显示全部楼层
[root@ceph1 mnt]# ceph -s
q! J3 B2 \( y' P4 } cluster:4 C, v' X) I; L; Y, U0 j
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
; W6 }* D/ O- c, i5 S health: HEALTH_WARN1 y: e6 b! @8 _ ^
too many PGs per OSD (272 > max 250)* \; U1 ]# M# \2 ^' z
0 v- c: g0 E: Q3 }, i( z1 ^) Q6 J services:% t) C9 W U3 c- `' p
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)
' }' \! N' z7 A$ w6 \# \$ G mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
# V# ~* H+ p2 ^$ S2 c mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
1 J# i8 b3 O2 _; a2 `. {. m osd: 3 osds: 3 up (since 7h), 3 in (since 19h); t! X' C9 {7 K$ O
rgw: 3 daemons active (ceph1, ceph2, ceph3)
& @' Z/ l2 U1 f3 e! ~2 B8 Z. ~ . X% i, B# ^: [& ^& P" {5 z* }
task status:
) M- |# P4 z2 Z: F" C; g# f
: ]6 j* A6 R1 |5 | data:
3 g8 O4 D$ i: N8 `& B" B pools: 8 pools, 273 pgs4 t* T1 P/ v7 n G W0 b
objects: 240 objects, 162 KiB
% T$ n/ c% s- _* C, l4 q8 p usage: 3.0 GiB used, 57 GiB / 60 GiB avail* m# r+ i7 j1 O
pgs: 273 active+clean
4 g" c4 y( O2 l1 b b
' C6 u# [ }0 o, E[root@ceph1 mnt]# ceph config get mon % d* @, d# h3 s8 }
WHO MASK LEVEL OPTION VALUE RO
t' h; _$ I& y6 K \mon advanced auth_allow_insecure_global_id_reclaim false
9 o& `) _' T1 r1 I8 T% ]/ Hmon advanced cluster_network 162.96.90.0/24 * ; C( Y! c, f9 q) h0 [0 z" v
global basic container_image quay.io/ceph/ceph:v15 *
+ v( u3 k/ l/ ~* v4 {& C4 P/ A1 E9 \mon advanced mon_allow_pool_delete true 7 t) ]: v1 o8 D% g8 Z: e3 h* P
mon advanced mon_max_pg_per_osd 500
9 S+ T3 Q7 e) f7 Jglobal advanced mon_target_pg_per_osd 500
/ O. Y9 U2 X" P# S3 R( X3 Sglobal advanced mon_warn_on_pool_no_redundancy false N4 u! M, d$ m- E# S- |) R7 U
mon advanced osd_max_pg_per_osd_hard_ratio 10.000000 ; r) w6 `/ |9 }9 o! V$ F8 }
mon advanced public_network 192.168.13.0/24 *
" O. z' ~6 i0 c6 _[root@ceph1 mnt]# ceph config set global mon_pg_per_osd 5009 K T$ Y) O! X% h/ l7 Y
Error EINVAL: unrecognized config option 'mon_pg_per_osd'
7 C! j- L$ e6 k2 A[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500
/ {$ \' V: J1 F5 m8 v) e[root@ceph1 mnt]# ceph config get mon
3 R% v; z; ?' `% [WHO MASK LEVEL OPTION VALUE RO
1 E3 ^5 X x3 e5 K+ i# emon advanced auth_allow_insecure_global_id_reclaim false ! |4 A3 d x4 J* p% }
mon advanced cluster_network 162.96.90.0/24 * ; t0 ~3 G5 B' F) o
global basic container_image quay.io/ceph/ceph:v15 * ! ?3 E. T6 Q) P2 l5 X9 G
mon advanced mon_allow_pool_delete true ! ~* q. j" m6 H
mon advanced mon_max_pg_per_osd 500 & c, v' C7 o( G
global advanced mon_target_pg_per_osd 500
4 g( k ^( t: A/ t t, X- iglobal advanced mon_warn_on_pool_no_redundancy false
& k8 w# q L0 A3 R% I) m; [mon advanced osd_max_pg_per_osd_hard_ratio 10.000000
5 P0 i4 S4 a7 [4 b' o. |- [6 E. ymon advanced public_network 192.168.13.0/24 * |5 @" y+ x/ i a, B2 O3 E* l
[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500
$ ]! A; A0 A9 y& {/ o[root@ceph1 mnt]# ceph -s
/ u2 Q" i2 e( o cluster:
v0 s" Z. [, j I+ n; J- E id: 433d3d2a-8e4a-11ed-b84b-000c293772973 T# \+ p. |3 T
health: HEALTH_OK
! g& X! Q! O7 |- E5 r) P+ x9 [8 R+ C # E3 E: E5 g% P7 {
services:( }7 L0 C1 Z; E* o8 W
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
5 r4 y4 I$ Y* _* N) ^% t9 a mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz
& X0 r: \+ C, X" `( h7 I mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
' Z1 F$ P8 P2 ]) G- P( A: f osd: 3 osds: 3 up (since 7h), 3 in (since 19h). p4 T _) T( Z+ t: b
rgw: 3 daemons active (ceph1, ceph2, ceph3)
6 Z0 I0 I! e7 ?# K) r* D * I+ i6 i6 @; _8 p
task status:
8 R. D8 Q9 A5 ?0 J$ d! c. ] 4 r; I0 g! S3 q2 e! Q( e. \ m
data:: G7 q$ w" ], O8 G
pools: 8 pools, 273 pgs5 V/ P, b" y8 q3 V! [
objects: 240 objects, 162 KiB
" D, |0 N7 K& s$ [. t) D1 w usage: 3.0 GiB used, 57 GiB / 60 GiB avail1 C* i4 q1 H0 W. y, H v# u9 m; s. E' e
pgs: 273 active+clean
$ v+ c/ Y8 J8 |: Q ; s/ J' T! @+ K! L
|
|