Files
app-rook-ceph/stx-rook-ceph-helm/stx-rook-ceph-helm/fluxcd-manifests/rook-ceph/rook-ceph-static-overrides.yaml
Gustavo Ornaghi Antunes 05c35a2ecd Fix image format for rook-ceph app
This change ensures that the images used by rook-ceph app is following
kube_app expected format by adding the prefix 'registry.local'.

This prevents the environment from always pulling images from the public
registry.

Test Plan:
 - PASS: Build rook-ceph app
 - PASS: Apply rook-ceph app with the changes
 - PASS: Update from a old rook-ceph app to a new one
 - PASS: Check if the images was downloaded with prefix registry.local
 - PASS: Check if all pods from rook-ceph is using the images with
         prefix registry.local

Closes-Bug: 2122134

Change-Id: I71dbc164c5091ea5e2a05ff7d4c486022f1e23ec
Signed-off-by: Gustavo Ornaghi Antunes <gustavo.ornaghiantunes@windriver.com>
2025-09-05 14:45:51 -03:00

296 lines
8.8 KiB
YAML
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#
# Copyright (c) 2024-2025 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
admissionController:
# Set tolerations and nodeAffinity for admission controller pod.
# The admission controller would be best to start on the same nodes as other ceph daemons.
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/control-plane
operator: Exists
- effect: NoSchedule
key: node-role.kubernetes.io/master
operator: Exists
allowLoopDevices: false
app.starlingx.io/component: platform
crds:
# Whether the helm chart should create and update the CRDs. If false, the CRDs must be
# managed independently with cluster/examples/kubernetes/ceph/crds.yaml.
# **WARNING** Only set during first deployment. If later disabled the cluster may be DESTROYED.
# If the CRDs are deleted in this case, see the disaster recovery guide to restore them.
# https://rook.github.io/docs/rook/master/ceph-disaster-recovery.html#restoring-crds-after-deletion
enabled: true
csi:
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
# If cephFSAttachRequired is set to false it skips the volume attachments and makes the creation
# of pods using the CephFS PVC fast. **WARNING** It's highly discouraged to use this for
# CephFS RWO volumes. Refer to this [issue](https://github.com/kubernetes/kubernetes/issues/103305) for more details.
cephFSAttachRequired: true
# -- Policy for modifying a volume's ownership or permissions when the CephFS PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
cephFSFSGroupPolicy: File
cephcsi:
# -- Ceph CSI image
image: quay.io/cephcsi/cephcsi:v3.13.1
registrar:
# -- Kubernetes CSI registrar image
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.13.0
provisioner:
# -- Kubernetes CSI provisioner image
image: registry.k8s.io/sig-storage/csi-provisioner:v5.1.0
snapshotter:
# -- Kubernetes CSI snapshotter image
image: registry.k8s.io/sig-storage/csi-snapshotter:v8.2.0
attacher:
# -- Kubernetes CSI Attacher image
image: registry.k8s.io/sig-storage/csi-attacher:v4.8.0
resizer:
# -- Kubernetes CSI resizer image
image: registry.k8s.io/sig-storage/csi-resizer:v1.13.1
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
cephfsPodLabels: app.starlingx.io/component=platform
# -- CEPH CSI CephFS plugin resource requirement list
# @default -- see values.yaml
csiCephFSPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 1Gi
# -- CEPH CSI RBD provisioner resource requirement list
# csi-omap-generator resources will be applied only if `enableOMAPGenerator` is set to `true`
# @default -- see values.yaml
csiRBDProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 0
limits:
memory: 1Gi
# -- CEPH CSI RBD plugin resource requirement list
# @default -- see values.yaml
csiRBDPluginResource: |
- name : driver-registrar
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-rbdplugin
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 1Gi
- name : liveness-prometheus
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
# -- CEPH CSI CephFS provisioner resource requirement list
# @default -- see values.yaml
csiCephFSProvisionerResource: |
- name : csi-provisioner
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-resizer
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-attacher
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-snapshotter
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 256Mi
- name : csi-cephfsplugin
resource:
requests:
memory: 0
cpu: 0
limits:
memory: 1Gi
# -- Enable Ceph CSI PVC encryption support
enableCSIEncryption: false
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
# in some network configurations where the SDN does not provide access to an external cluster or
# there is significant drop in read/write performance
enableCSIHostNetwork: true
# -- Enable Ceph CSI CephFS driver
enableCephfsDriver: true
# -- Enable Snapshotter in CephFS provisioner pod
enableCephfsSnapshotter: true
# -- Enable adding volume metadata on the CephFS subvolumes and RBD images.
# Not all users might be interested in getting volume/snapshot details as metadata on CephFS subvolume and RBD images.
# Hence enable metadata is false by default
enableMetadata: false
# -- Enable Snapshotter in NFS provisioner pod
enableNFSSnapshotter: false
# -- Enable Host mount for `/etc/selinux` directory for Ceph CSI nodeplugins
enablePluginSelinuxHostMount: false
# -- Enable Snapshotter in RBD provisioner pod
enableRBDSnapshotter: true
# -- Enable Ceph CSI RBD driver
enableRbdDriver: true
# -- Enable Ceph Kernel clients on kernel < 4.17. If your kernel does not support quotas for CephFS
# you may want to disable this setting. However, this will cause an issue during upgrades
# with the FUSE client. See the [upgrade guide](https://rook.io/docs/rook/v1.2/ceph-upgrade.html)
forceCephFSKernelClient: true
kubeletDirPath: /var/lib/kubelet
# -- PriorityClassName to be set on csi driver plugin pods
pluginPriorityClassName: system-node-critical
pluginTolerations:
- operator: "Exists"
# -- PriorityClassName to be set on csi driver provisioner pods
provisionerPriorityClassName: system-cluster-critical
provisionerReplicas: 1
provisionerTolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
# -- Policy for modifying a volume's ownership or permissions when the RBD PVC is being mounted.
# supported values are documented at https://kubernetes-csi.github.io/docs/support-fsgroup.html
rbdFSGroupPolicy: "File"
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
rbdPodLabels: "app.starlingx.io/component=platform"
currentNamespaceOnly: false
# Disable automatic orchestration when new devices are discovered.
disableDeviceHotplug: false
# Blacklist certain disks according to the regex provided.
discoverDaemonUdev:
# In some situations SELinux relabelling breaks (times out) on large filesystems, and doesn't work with cephfs ReadWriteMany volumes (last relabel wins).
# Disable it here if you have similar issues.
# For more details see https://github.com/rook/rook/issues/2417
enableSelinuxRelabeling: true
# Writing to the hostPath is required for the Ceph mon and osd pods. Given the restricted permissions in OpenShift with SELinux,
# the pod must be running privileged in order to write to the hostPath volume, this must be set to true then.
enableDiscoveryDaemon: false
# Tolerations for the rook-ceph-operator to allow it to run on nodes with particular taints
hostpathRequiresPrivileged: false
image:
prefix: rook
repository: docker.io/rook/ceph
tag: v1.16.6
pullPolicy: IfNotPresent
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
imagePullSecrets:
- name: default-registry-key
nodeSelector: {node-role.kubernetes.io/control-plane : ""}
pspEnable: false
resources:
limits:
memory: 512Mi
requests:
cpu: 0
memory: 0
tolerations:
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule