Files
rook-ceph/stx-rook-ceph/stx-rook-ceph/helm-charts/rook-operator/templates/rook-config-override.yaml
Martin, Chen 5bbddf837a Introduce application to deploy rook ceph, support for Helm v3
rook enable with such procedure
1, deploy system without add ceph storage backend
2, update osd info with helm override
    $ system helm-override-update rook-ceph-apps rook-ceph --values value.yaml
3, apply rook-ceph application
    $ system application-apply rook-ceph-storage

sample value.yaml to deploy provision sdb on host 'controller-0'
cluster:
  storage:
    nodes:
    - devices:
      - config:
          journalSizeMB: 1024
          storeType: bluestore
        name: sdb
      name: controller-0

Story: 2005527
Task: 39214

Depends-On: https://review.opendev.org/#/c/713084/

Change-Id: Ie8f43082a5022b4e3507f0ac8fe0a2654e2a3302
Signed-off-by: Martin, Chen <haochuan.z.chen@intel.com>
2021-01-08 08:52:39 +08:00

33 lines
902 B
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: rook-config-override
namespace: {{ .Release.Namespace }}
data:
config: |
[global]
osd_journal_size = 1024
osd_pool_default_size = 1
osd_pool_default_min_size = 1
osd_pool_default_pg_num = 64
osd_pool_default_pgp_num = 64
osd_crush_chooseleaf_type = 1
setuser match path = /var/lib/ceph/$type/$cluster-$id
mon_osd_min_down_reporters = 1
osd_mon_report_interval_max = 120
mon_max_pg_per_osd = 2048
osd_max_pg_per_osd_hard_ratio = 1.2
ms_bind_ipv6 = false
[osd]
osd_mkfs_type = xfs
osd_mkfs_options_xfs = "-f"
osd_mount_options_xfs = "rw,noatime,inode64,logbufs=8,logbsize=256k"
[mon]
mon warn on legacy crush tunables = false
mon pg warn max per osd = 2048
mon pg warn max object skew = 0
mon clock drift allowed = .1
mon warn on pool no redundancy = false