parameter_defaults:
CephPoolDefaultSize: 3
CephPoolDefaultPgNum: 64
CephPools:
- {"name": backups, "pg_num": 128, "pgp_num": 128, "application": "rbd"}
- {"name": volumes, "pg_num": 256, "pgp_num": 256, "application": "rbd"}
- {"name": vms, "pg_num": 64, "pgp_num": 64, "application": "rbd"}
- {"name": images, "pg_num": 32, "pgp_num": 32, "application": "rbd"}
CephConfigOverrides:
osd_recovery_op_priority: 3
osd_recovery_max_active: 3
osd_max_backfills: 1
CephAnsibleExtraConfig:
nb_retry_wait_osd_up: 60
delay_wait_osd_up: 20
is_hci: true
# 3 OSDs * 4 vCPUs per SSD = 12 vCPUs (list below not used for VNF)
ceph_osd_docker_cpuset_cpus: "32,34,36,38,40,42,76,78,80,82,84,86" #
# cpu_limit 0 means no limit as we are limiting CPUs with cpuset above
ceph_osd_docker_cpu_limit: 0 #
# numactl preferred to cross the numa boundary if we have to
# but try to only use memory from numa node0
# cpuset-mems would not let it cross numa boundary
# lots of memory so NUMA boundary crossing unlikely
ceph_osd_numactl_opts: "-N 0 --preferred=0" #
CephAnsibleDisksConfig:
osds_per_device: 1
osd_scenario: lvm
osd_objectstore: bluestore
devices:
- /dev/sda
- /dev/sdb
- /dev/sdc
parameter_defaults:
CephPoolDefaultSize: 3
CephPoolDefaultPgNum: 64
CephPools:
- {"name": backups, "pg_num": 128, "pgp_num": 128, "application": "rbd"}
- {"name": volumes, "pg_num": 256, "pgp_num": 256, "application": "rbd"}
- {"name": vms, "pg_num": 64, "pgp_num": 64, "application": "rbd"}
- {"name": images, "pg_num": 32, "pgp_num": 32, "application": "rbd"}
CephConfigOverrides:
osd_recovery_op_priority: 3
osd_recovery_max_active: 3
osd_max_backfills: 1
CephAnsibleExtraConfig:
nb_retry_wait_osd_up: 60
delay_wait_osd_up: 20
is_hci: true
ceph_osd_docker_cpuset_cpus: "32,34,36,38,40,42,76,78,80,82,84,86"
1
ceph_osd_docker_cpu_limit: 0
2
ceph_osd_numactl_opts: "-N 0 --preferred=0"
3
CephAnsibleDisksConfig:
osds_per_device: 1
osd_scenario: lvm
osd_objectstore: bluestore
devices:
- /dev/sda
- /dev/sdb
- /dev/sdc