# Pastebin WeZQm0Do vagrant@ceph-admin:~/test-cluster$ ceph osd tree # id weight type name up/down reweight -1 0.07999 root default -2 0.03999 host ceph-server-2 0 0.03999 osd.0 up 1 -3 0.03999 host ceph-server-3 1 0.03999 osd.1 up 1 vagrant@ceph-admin:~/test-cluster$ ceph osd dump | grep size pool 0 'data' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 1 flags hashpspool crash_replay_interval 45 stripe_width 0 pool 1 'metadata' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 1 flags hashpspool stripe_width 0 pool 2 'rbd' replicated size 3 min_size 2 crush_ruleset 0 object_hash rjenkins pg_num 64 pgp_num 64 last_change 1 flags hashpspool stripe_width 0