- 积分
- 16840
在线时间 小时
最后登录1970-1-1
|

楼主 |
发表于 2023-1-9 17:03:34
|
显示全部楼层
[root@ceph1 mnt]# ceph -s
+ ?- l7 V3 F# |6 l% A cluster:
- Q: i& K1 H$ ^, K, M" f& n id: 433d3d2a-8e4a-11ed-b84b-000c29377297
g7 |+ y |; @# |/ ]1 r; J# ] health: HEALTH_WARN6 D9 [, g ~: ?) D& t
too many PGs per OSD (272 > max 250)7 ?5 y2 q$ w/ l/ {2 \3 t, r
7 U5 S/ [; g4 h* d6 y! v1 a/ ^ services:
! _ q" ]0 x4 x mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 4m)# g/ I) \/ ~* t% u( k
mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz5 T" R2 P7 q0 y8 @% j. v
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
4 x7 F# M+ r9 I. V" C osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
+ J% p, p5 b9 r+ K- r r3 A' t0 ], ] rgw: 3 daemons active (ceph1, ceph2, ceph3)
% o }' d( O, W
2 I4 Y2 c! @. G& i% E) x- R task status:1 n. o c" U {7 | n- u( k* r
& u. X* g9 |: A' `( n( y! K
data:
6 Q; ^0 ~- {) v k4 j- A pools: 8 pools, 273 pgs2 {) M5 `+ r. i4 R9 c: R j" X
objects: 240 objects, 162 KiB6 {' X$ l) f" q4 x
usage: 3.0 GiB used, 57 GiB / 60 GiB avail2 z0 `8 H5 p4 s _( E/ D1 H) L$ i
pgs: 273 active+clean( I. D1 M; l9 ^3 U" R
6 P ~( I0 d! \+ V
[root@ceph1 mnt]# ceph config get mon 6 e7 ?& n" e l! M- N0 r1 ?; A
WHO MASK LEVEL OPTION VALUE RO) i% n5 j/ J) G- w# k' F
mon advanced auth_allow_insecure_global_id_reclaim false
! ?4 l7 B8 E2 N) \' lmon advanced cluster_network 162.96.90.0/24 *
" R. p0 l$ r( iglobal basic container_image quay.io/ceph/ceph:v15 *
( _8 w3 O x9 {; |) @% O+ rmon advanced mon_allow_pool_delete true
( o H1 n2 e! x* [& V9 o3 K( H" Fmon advanced mon_max_pg_per_osd 500
" {" n" X7 c# q5 r( ]4 Fglobal advanced mon_target_pg_per_osd 500 ( C! _" g4 K% P w1 z' }5 l
global advanced mon_warn_on_pool_no_redundancy false " d/ ]6 H2 [: A/ s3 ]- X9 \
mon advanced osd_max_pg_per_osd_hard_ratio 10.000000 6 p" ~! u) Q% g$ c. ~
mon advanced public_network 192.168.13.0/24 * 6 _( h. @$ F/ q/ b9 F, I/ w. G0 c! y
[root@ceph1 mnt]# ceph config set global mon_pg_per_osd 500& w$ m9 c6 e* S5 k4 X1 F
Error EINVAL: unrecognized config option 'mon_pg_per_osd'
# o4 U, F; F. w% e/ G" T[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 5001 _) [6 \0 C3 X# m/ }: _
[root@ceph1 mnt]# ceph config get mon : ], U' O7 x& [1 B% E
WHO MASK LEVEL OPTION VALUE RO
* g0 D0 t @( V0 M/ ~mon advanced auth_allow_insecure_global_id_reclaim false
5 {1 j1 A3 `' c. A; B1 u9 smon advanced cluster_network 162.96.90.0/24 * ; m) A {0 V' L7 W9 ~* `/ u1 h
global basic container_image quay.io/ceph/ceph:v15 *
( z' @" V4 n+ ?. w+ J& n1 pmon advanced mon_allow_pool_delete true : F1 ^& S! ]. N. x1 r
mon advanced mon_max_pg_per_osd 500 6 s& W+ L$ ^7 N7 {
global advanced mon_target_pg_per_osd 500
; S: a L$ Z6 J& pglobal advanced mon_warn_on_pool_no_redundancy false
! k, l C8 g! f H+ V$ C! ^( qmon advanced osd_max_pg_per_osd_hard_ratio 10.000000 ! f y+ w' p5 N' b/ ~4 J z8 y
mon advanced public_network 192.168.13.0/24 *
+ p+ b$ L# k/ m- G: n) ]" h[root@ceph1 mnt]# ceph config set global mon_max_pg_per_osd 500( W6 ~3 ?- Q* Y. M6 \
[root@ceph1 mnt]# ceph -s* D+ ?1 r) g8 K* h7 k2 q6 ~5 y
cluster:0 O3 E& Q4 F7 ^* h7 s$ v3 V
id: 433d3d2a-8e4a-11ed-b84b-000c29377297
# B3 q2 i; _! Y9 U5 w health: HEALTH_OK6 m6 r# ?) s- E* W+ {
; L; Q& l r& q! Q( C0 k1 ^' u
services:* }) f% w* T, d; e
mon: 3 daemons, quorum ceph1,ceph2,ceph3 (age 5m)
3 l, w( \+ t3 S" _( F mgr: ceph2.twhubv(active, since 6h), standbys: ceph3.dkkmft, ceph1.zmducz, E. f2 D* n/ P- ]* R
mds: cephfs:1 {0=cephfs.ceph3.idylsu=up:active} 1 up:standby
2 Y7 N& `6 `$ S osd: 3 osds: 3 up (since 7h), 3 in (since 19h)
2 f e2 p) s8 K; `) W rgw: 3 daemons active (ceph1, ceph2, ceph3)
" R Y( B4 U% _4 ?. @1 T! q
; F \: E& I( T task status:+ _/ R0 q* _1 P0 n" L7 ~
# a: \/ ^5 Q* p P2 C' [ data:& j5 H3 D$ q6 Z- D) X/ g. J
pools: 8 pools, 273 pgs+ l d, z8 E" @, W
objects: 240 objects, 162 KiB
0 M+ S/ Y0 h9 |; D! m usage: 3.0 GiB used, 57 GiB / 60 GiB avail
& [6 {/ i4 M- |! z9 ~$ {$ Z9 v pgs: 273 active+clean
# {5 Z; F% }5 Z% f* H9 @ ) j8 J, D0 o: t/ [8 ^' b
|
|