Rook Ceph upgrade 1.16
Upgrades Rook Ceph to version 1.16.6 and its Ceph version to 18.2.5. Static overrides were adapted to the new Rook Ceph version. A new upgrade lifecycle is now used to make preparations on the upgrade process based on currently installed version. ECblock pools was renamed to be compliant to the upstream new Standard. This action aims to avoid the need to manually patch ECblock resources on every subsequent update. A job that monitors the removal of the CSI pluginholder pods was added. On rook ceph 1.16, pluginholder pods were deprecated, and, to avoid draining all nodes on the update process, a job that monitors all PVs is now deployed. As soon as all PVs are pointing to the updated CSI provisioner, the holderpods can be safely removed. The configuration for CSI readAffinity is transfered to the cephcluster resource when updating from 1.13 to 1.16 to solve the breaking change introduced on 1.14. The upgrade from 1.13 is currently not working in the DX platforms yet due to some floating monitor related issue. This will be covered in https://review.opendev.org/c/starlingx/app-rook-ceph/+/948487. Test Plan: PASS: Update app with ecblock on SX and check update was successfully without data loss PASS: Update app with holder pods and check their removal on ipv4 and ipv6 PASS: Update app with readAffinity option enabled on the previous version PASS: Update Ceph minor version to 18.2.5 PASS: Fresh install [SX/DX/DX+/STD] PASS: Update app from previous version and confirm all daemons are updated to new ceph/rook version [SX/DX+/STD] PASS: Ensure proper operation of services: cephfs, RBD, ecblock, RGW [SX/DX/DX+/STD] Story: 2011372 Task: 51748 Change-Id: I2684573940ff5bac4709157131c6e6aad9114ade Signed-off-by: Caio Correa <caio.correa@windriver.com> Signed-off-by: Ítalo Vieira <italo.gomesvieira@windriver.com>
This commit is contained in:
@@ -8,11 +8,11 @@ App-rook-ceph fluxCD app
|
||||
│ ├── debian_build_layer.cfg
|
||||
│ ├── debian_iso_image.inc
|
||||
│ ├── debian_pkg_dirs
|
||||
│ ├── python3-k8sapp-rook-ceph # lifecycle managemnt code to support flux apps
|
||||
│ ├── python3-k8sapp-rook-ceph # lifecycle managemnt code to support flux apps
|
||||
│ ├── README.md
|
||||
│ ├── rook-ceph-helm # importing of upstream rook-ceph helm packages
|
||||
│ ├── helm-charts # upstream and custom rook-ceph helm packages
|
||||
│ ├── requirements.txt
|
||||
│ ├── stx-rook-ceph-helm # helm Package manager for the app
|
||||
│ ├── stx-rook-ceph-helm # helm Package manager for the app
|
||||
│ ├── test-requirements.txt
|
||||
│ └── tox.ini
|
||||
```
|
||||
|
@@ -1,3 +1,10 @@
|
||||
rook-ceph-floating-monitor-helm (2.1-0) unstable; urgency=medium
|
||||
|
||||
* Upversion to 1.16
|
||||
|
||||
-- Caio Correa <caio.correa@windriver.com> Fri, 02 May 2025 09:45:00 -0300
|
||||
|
||||
|
||||
rook-ceph-floating-monitor-helm (1.0-0) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
@@ -1,10 +1,10 @@
|
||||
---
|
||||
debname: rook-ceph-floating-monitor-helm
|
||||
debver: 1.0-0
|
||||
debver: 2.1-0
|
||||
src_path: rook-ceph-floating-monitor-helm
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
PKG_GITREVCOUNT: true
|
||||
GITREVCOUNT:
|
||||
SRC_DIR: ${MY_REPO}/stx/app-rook-ceph/helm-charts/custom/rook-ceph-floating-monitor-helm/rook-ceph-floating-monitor-helm/rook-ceph-floating-monitor
|
||||
BASE_SRCREV: c6c693d51cdc6daa4eafe34ccab5ce35496bf516
|
||||
BASE_SRCREV: 78b6f162a3e44a5e6c728661ff2f6ab1ebafce36
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -18,12 +18,12 @@ metadata:
|
||||
app.kubernetes.io/name: ceph-mon
|
||||
app.kubernetes.io/part-of: rook-ceph
|
||||
app.starlingx.io/component: platform
|
||||
ceph-version: 18.2.2-0
|
||||
ceph-version: 18.2.5-0
|
||||
ceph_daemon_id: float
|
||||
ceph_daemon_type: mon
|
||||
mon: float
|
||||
mon_cluster: rook-ceph
|
||||
rook-version: v1.13.7
|
||||
rook-version: v1.16.6
|
||||
rook.io/operator-namespace: rook-ceph
|
||||
rook_cluster: rook-ceph
|
||||
name: rook-ceph-mon-float
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -8,7 +8,7 @@ app.starlingx.io/component: platform
|
||||
|
||||
images:
|
||||
tags:
|
||||
ceph: quay.io/ceph/ceph:v18.2.2
|
||||
ceph: quay.io/ceph/ceph:v18.2.5
|
||||
kubectl: docker.io/bitnami/kubectl:1.29
|
||||
|
||||
config:
|
||||
|
@@ -1,3 +1,9 @@
|
||||
rook-ceph-provisioner-helm (2.1-0) unstable; urgency=medium
|
||||
|
||||
* Upversion to 1.16
|
||||
|
||||
-- Caio Correa <caio.correa@windriver.com> Fri, 02 May 2025 09:45:00 -0300
|
||||
|
||||
rook-ceph-provisioner-helm (2.0-0) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
@@ -1,10 +1,10 @@
|
||||
---
|
||||
debname: rook-ceph-provisioner-helm
|
||||
debver: 2.0-0
|
||||
debver: 2.1-0
|
||||
src_path: rook-ceph-provisioner-helm
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
PKG_GITREVCOUNT: true
|
||||
GITREVCOUNT:
|
||||
SRC_DIR: ${MY_REPO}/stx/app-rook-ceph/helm-charts/custom/rook-ceph-provisioner-helm/rook-ceph-provisioner-helm/rook-ceph-provisioner
|
||||
BASE_SRCREV: c6c693d51cdc6daa4eafe34ccab5ce35496bf516
|
||||
BASE_SRCREV: 0dba46b5508b4910860439de303cafec7dc5408d
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
|
@@ -1,6 +1,6 @@
|
||||
{{/*
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -17,7 +17,7 @@ rules:
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
resources: ["configmaps","pods/exec"]
|
||||
verbs: ["get", "create", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
|
@@ -38,7 +38,6 @@ images:
|
||||
tags:
|
||||
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
stx_ceph_manager: docker.io/starlingx/stx-ceph-manager:stx.10.0-v18.2.2-0
|
||||
k8s_entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
|
||||
kubectl: docker.io/bitnami/kubectl:1.29
|
||||
|
||||
|
||||
|
@@ -1,3 +1,9 @@
|
||||
rook-ceph-helm (1.16-6) unstable; urgency=medium
|
||||
|
||||
* Upgrade to version 1.16-6.
|
||||
|
||||
-- Ítalo Vieira <italo.gomesvieira@windriver.com> Fri, 26 Feb 2025 08:37:12 +0000
|
||||
|
||||
rook-ceph-helm (1.13-7) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
From e225331b54bbeb1c027840fd27e22fd5c2d7bbd8 Mon Sep 17 00:00:00 2001
|
||||
From 853d5db8ad9077bf96f46952a30d80f62e1e3c9e Mon Sep 17 00:00:00 2001
|
||||
From: Caio Correa <caio.correa@windriver.com>
|
||||
Date: Fri, 5 Apr 2024 08:01:17 -0300
|
||||
Subject: [PATCH] Add chart for duplex preparation
|
||||
@@ -8,6 +8,7 @@ rook-ceph-mon. On a duplex this entrypoint should be the floating IP
|
||||
to acomplish the roaming mon strategy.
|
||||
|
||||
Signed-off-by: Caio Correa <caio.correa@windriver.com>
|
||||
Signed-off-by: Ítalo Vieira <italo.gomesvieira@windriver.com>
|
||||
---
|
||||
.../pre-install-duplex-preparation.yaml | 82 +++++++++++++++++++
|
||||
deploy/charts/rook-ceph-cluster/values.yaml | 18 ++++
|
||||
@@ -16,7 +17,7 @@ Signed-off-by: Caio Correa <caio.correa@windriver.com>
|
||||
|
||||
diff --git a/deploy/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml b/deploy/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml
|
||||
new file mode 100644
|
||||
index 000000000..61e64c87b
|
||||
index 000000000..aa276ec55
|
||||
--- /dev/null
|
||||
+++ b/deploy/charts/rook-ceph-cluster/templates/pre-install-duplex-preparation.yaml
|
||||
@@ -0,0 +1,82 @@
|
||||
@@ -103,16 +104,16 @@ index 000000000..61e64c87b
|
||||
+ mountPath: /tmp/mount
|
||||
+{{- end }}
|
||||
diff --git a/deploy/charts/rook-ceph-cluster/values.yaml b/deploy/charts/rook-ceph-cluster/values.yaml
|
||||
index 36a79d063..ebd262496 100644
|
||||
index b7d2e1d6d..f1965b5a8 100644
|
||||
--- a/deploy/charts/rook-ceph-cluster/values.yaml
|
||||
+++ b/deploy/charts/rook-ceph-cluster/values.yaml
|
||||
@@ -678,3 +678,21 @@ cephObjectStores:
|
||||
@@ -698,3 +698,21 @@ cephObjectStores:
|
||||
# -- CSI driver name prefix for cephfs, rbd and nfs.
|
||||
# @default -- `namespace name where rook-ceph operator is deployed`
|
||||
csiDriverNamePrefix:
|
||||
+
|
||||
+hook:
|
||||
+ image: docker.io/openstackhelm/ceph-config-helper:ubuntu_bionic-20220802
|
||||
+ image: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
+ duplexPreparation:
|
||||
+ enable: false
|
||||
+ activeController: controller-0
|
||||
@@ -128,6 +129,3 @@ index 36a79d063..ebd262496 100644
|
||||
+ serviceAccount: rook-ceph-cleanup
|
||||
+ mon_hosts:
|
||||
+ - controller-0
|
||||
--
|
||||
2.34.1
|
||||
|
||||
|
@@ -1,4 +1,4 @@
|
||||
From 063c5276ddcf8fbb08cb972bf57e716ef90ea577 Mon Sep 17 00:00:00 2001
|
||||
From 26c868ec34d5441e0ec3a9b0998f31676cccc943 Mon Sep 17 00:00:00 2001
|
||||
From: Caio Correa <caio.correa@windriver.com>
|
||||
Date: Thu, 16 May 2024 16:09:11 -0300
|
||||
Subject: [PATCH] Add starlingx label to operator and toolbox pod
|
||||
@@ -8,16 +8,17 @@ toolbox pod.
|
||||
With this label, the pods are scheduled on platform cores.
|
||||
|
||||
Signed-off-by: Caio Correa <caio.correa@windriver.com>
|
||||
Signed-off-by: Ítalo Vieira <italo.gomesvieira@windriver.com>
|
||||
---
|
||||
deploy/charts/rook-ceph-cluster/templates/deployment.yaml | 1 +
|
||||
deploy/charts/rook-ceph/templates/deployment.yaml | 1 +
|
||||
2 files changed, 2 insertions(+)
|
||||
|
||||
diff --git a/deploy/charts/rook-ceph-cluster/templates/deployment.yaml b/deploy/charts/rook-ceph-cluster/templates/deployment.yaml
|
||||
index b785da650..592e2f715 100644
|
||||
index 63adf22d3..a8e7cd837 100644
|
||||
--- a/deploy/charts/rook-ceph-cluster/templates/deployment.yaml
|
||||
+++ b/deploy/charts/rook-ceph-cluster/templates/deployment.yaml
|
||||
@@ -16,6 +16,7 @@ spec:
|
||||
@@ -19,6 +19,7 @@ spec:
|
||||
metadata:
|
||||
labels:
|
||||
app: rook-ceph-tools
|
||||
@@ -26,17 +27,14 @@ index b785da650..592e2f715 100644
|
||||
dnsPolicy: ClusterFirstWithHostNet
|
||||
{{- $network := .Values.cephClusterSpec.network | default dict -}}
|
||||
diff --git a/deploy/charts/rook-ceph/templates/deployment.yaml b/deploy/charts/rook-ceph/templates/deployment.yaml
|
||||
index b9f38d233..7505d6f95 100644
|
||||
index cee5d63af..c18c761a0 100644
|
||||
--- a/deploy/charts/rook-ceph/templates/deployment.yaml
|
||||
+++ b/deploy/charts/rook-ceph/templates/deployment.yaml
|
||||
@@ -19,6 +19,7 @@ spec:
|
||||
@@ -22,6 +22,7 @@ spec:
|
||||
labels:
|
||||
app: rook-ceph-operator
|
||||
helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}"
|
||||
+ app.starlingx.io/component: platform
|
||||
{{- if .Values.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.annotations | indent 8 }}
|
||||
--
|
||||
2.34.1
|
||||
|
||||
{{- if .Values.operatorPodLabels }}
|
||||
{{ toYaml .Values.operatorPodLabels | indent 8 }}
|
||||
{{- end }}
|
||||
|
@@ -1,43 +0,0 @@
|
||||
From c9afa7f22522b5f81ce6ab46e25e3046f417f92e Mon Sep 17 00:00:00 2001
|
||||
From: Caio Correa <caio.correa@windriver.com>
|
||||
Date: Tue, 29 Oct 2024 18:05:08 -0300
|
||||
Subject: [PATCH] Add annotation for ecblock default storage class
|
||||
|
||||
Add annotation for ecblock's storage class be deployed as default storage
|
||||
class.
|
||||
|
||||
This patch was made adapting an excerpt from a commit from the upstream
|
||||
rook ceph repository and can be removed when Rook Ceph is upgrade to chart
|
||||
version v1.15.4.
|
||||
|
||||
Original commit:
|
||||
https://github.com/rook/rook/commit/de82481d805a4da58c6c0c663fa1cea253cdaa3a
|
||||
|
||||
Original commit message:
|
||||
helm: allow custom labels and annotations for storage classes
|
||||
|
||||
Storage classes were missing the possilbilty to attach custom labels and
|
||||
annotations. This change brings it on par with VolumeSnapshotClass
|
||||
objects.
|
||||
|
||||
Signed-off-by: Caio Correa <caio.correa@windriver.com>
|
||||
---
|
||||
deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml | 2 ++
|
||||
1 file changed, 2 insertions(+)
|
||||
|
||||
diff --git a/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml b/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml
|
||||
index 85581f5..c4a5114 100644
|
||||
--- a/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml
|
||||
+++ b/deploy/charts/rook-ceph-cluster/templates/cephecblockpool.yaml
|
||||
@@ -16,6 +16,8 @@ apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: {{ $cephEcStorage.name }}
|
||||
+ annotations:
|
||||
+ storageclass.kubernetes.io/is-default-class: "{{ if default false $cephEcStorage.isDefault }}true{{ else }}false{{ end }}"
|
||||
{{- if $root.Values.csiDriverNamePrefix }}
|
||||
provisioner: {{ $root.Values.csiDriverNamePrefix }}.rbd.csi.ceph.com
|
||||
{{- else }}
|
||||
--
|
||||
2.34.1
|
||||
|
@@ -1,4 +1,3 @@
|
||||
0001-Add-chart-for-duplex-preparation.patch
|
||||
0002-Add-starlingx-label-to-operator-and-toolbox-pod.patch
|
||||
0003-Add-support-ceph-dashboard.patch
|
||||
0004-Add-annotation-for-ecblock-default-storage-class.patch
|
||||
|
@@ -1,15 +1,16 @@
|
||||
---
|
||||
debname: rook-ceph-helm
|
||||
debver: 1.13-7
|
||||
debver: 1.16-6
|
||||
dl_path:
|
||||
name: rook-ceph-1.13.7.tar.gz
|
||||
url: https://github.com/rook/rook/archive/refs/tags/v1.13.7.tar.gz
|
||||
sha256sum: 8595c8029240ad451a845bf3a45d26af4797909009f104191969577fd45ac1fc
|
||||
name: rook-ceph-1.16.6.tar.gz
|
||||
url: https://github.com/rook/rook/archive/refs/tags/v1.16.6.tar.gz
|
||||
sha256sum: 1e804ed2f44f5d37fa8482c8b5e69853c53f0e5a347f00e3476d4e33e57f6ffc
|
||||
|
||||
src_files:
|
||||
- rook-ceph-helm/files/Makefile
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
stx_patch: 0
|
||||
stx_patch: 2
|
||||
GITREVCOUNT:
|
||||
BASE_SRCREV: c6c693d51cdc6daa4eafe34ccab5ce35496bf516
|
||||
BASE_SRCREV: 7569f3a4d169566440d9975679caafa65af3eb67
|
||||
SRC_DIR: ${MY_REPO}/stx/app-rook-ceph/helm-charts/upstream/rook-ceph-helm
|
||||
|
@@ -1,3 +1,10 @@
|
||||
python3-k8sapp-rook-ceph (2.1-0) unstable; urgency=medium
|
||||
|
||||
* Upversion to 1.16
|
||||
|
||||
-- Caio Cesar Correa <caio.correa@windriver.com> Fri, 02 May 2025 09:45:00 -0300
|
||||
|
||||
|
||||
python3-k8sapp-rook-ceph (1.0-1) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
@@ -1,9 +1,9 @@
|
||||
---
|
||||
debname: python3-k8sapp-rook-ceph
|
||||
debver: 1.0-1
|
||||
debver: 2.1-0
|
||||
src_path: k8sapp_rook_ceph
|
||||
revision:
|
||||
dist: $STX_DIST
|
||||
GITREVCOUNT:
|
||||
SRC_DIR: ${MY_REPO}/stx/app-rook-ceph
|
||||
BASE_SRCREV: c6c693d51cdc6daa4eafe34ccab5ce35496bf516
|
||||
BASE_SRCREV: 78b6f162a3e44a5e6c728661ff2f6ab1ebafce36
|
||||
|
@@ -93,17 +93,16 @@ STORAGE_CLASS_NAMES = {
|
||||
}
|
||||
|
||||
# pool names
|
||||
POOL_NAME_BLOCK = 'kube-rbd' # For historic alignment. Upstream: ceph-blockpool
|
||||
POOL_NAME_ECBLOCK_DATA = 'ec-data-pool' # Align with upstream. Upstream: ec-data-pool
|
||||
POOL_NAME_ECBLOCK_METADATA = 'ec-metadata-pool' # Align with upstream. Upstream: ec-metadata-pool
|
||||
POOL_NAME_FS = 'kube-cephfs' # For historic alignment. Upstream: ceph-filesystem
|
||||
POOL_NAME_RGW = 'ceph-objectstore' # Align with upstream. Upstream: ceph-objectstore
|
||||
POOL_NAME_MGR = 'builtin-mgr' # Align with upstream. Upstream: builtin-mgr
|
||||
POOL_NAME_BLOCK = 'kube-rbd' # For historic alignment. Upstream: ceph-blockpool
|
||||
POOL_NAME_ECBLOCK = 'kube-ecblock' # Align with upstream. Upstream: ec-pool
|
||||
POOL_NAME_ECBLOCK_METADATA = 'kube-ecblock-metadata' # Align with upstream. Upstream: ec-pool-metadata
|
||||
POOL_NAME_FS = 'kube-cephfs' # For historic alignment. Upstream: ceph-filesystem
|
||||
POOL_NAME_RGW = 'ceph-objectstore' # Align with upstream. Upstream: ceph-objectstore
|
||||
POOL_NAME_MGR = 'builtin-mgr' # Align with upstream. Upstream: builtin-mgr
|
||||
|
||||
POOL_NAMES = {
|
||||
SVC_BLOCK: POOL_NAME_BLOCK,
|
||||
SVC_ECBLOCK: {'data': POOL_NAME_ECBLOCK_DATA,
|
||||
'metadata': POOL_NAME_ECBLOCK_METADATA},
|
||||
SVC_ECBLOCK: POOL_NAME_ECBLOCK,
|
||||
SVC_FS: POOL_NAME_FS,
|
||||
SVC_OBJ: POOL_NAME_RGW,
|
||||
}
|
||||
@@ -154,5 +153,7 @@ ROOK_RGW_NAME = "ceph-objectstore"
|
||||
|
||||
ROOK_ECBLOCK_PLURAL = "cephblockpools"
|
||||
ROOK_ECBLOCKPOOL_NAME = "ceph-blockpool"
|
||||
ROOK_ECBLOCKDATAPOOL_NAME = "ec-data-pool"
|
||||
ROOK_ECBLOCKMETADATAPOOL_NAME = "ec-metadata-pool"
|
||||
ROOK_ECBLOCKDATAPOOL_NAME = POOL_NAME_ECBLOCK
|
||||
ROOK_ECBLOCKMETADATAPOOL_NAME = POOL_NAME_ECBLOCK_METADATA
|
||||
|
||||
ROOK_ECBLOCK_PROVISIONER = "rook-ceph.rbd.csi.ceph.com"
|
||||
|
@@ -235,55 +235,59 @@ class RookCephClusterHelm(storage.StorageBaseHelm):
|
||||
|
||||
return block_config
|
||||
|
||||
def _get_ecblocksc_overrides(self):
|
||||
|
||||
ec_block_sc_config = {
|
||||
"provisioner": app_constants.ROOK_ECBLOCK_PROVISIONER,
|
||||
"enabled": True,
|
||||
"name": app_constants.STORAGE_CLASS_NAMES[app_constants.SVC_ECBLOCK],
|
||||
"isDefault": True,
|
||||
"allowVolumeExpansion": True,
|
||||
"reclaimPolicy": "Delete",
|
||||
'annotations': {
|
||||
'helm.sh/hook': 'post-upgrade, post-install',
|
||||
'helm.sh/hook-delete-policy': 'before-hook-creation',
|
||||
},
|
||||
}
|
||||
|
||||
return ec_block_sc_config
|
||||
|
||||
def _get_ecblock_overrides(self):
|
||||
|
||||
parameters = {
|
||||
"clusterID": 'rook-ceph',
|
||||
'imageFormat': '2',
|
||||
'imageFeatures': 'layering',
|
||||
}
|
||||
|
||||
pool_parameters = {
|
||||
'min_size': str(self._get_data_min_replication_factor())
|
||||
}
|
||||
|
||||
ec_block_config = [{
|
||||
"name": app_constants.POOL_NAMES[app_constants.SVC_ECBLOCK]['metadata'],
|
||||
"name": app_constants.POOL_NAMES[app_constants.SVC_ECBLOCK],
|
||||
"spec": {
|
||||
"failureDomain": self._get_failure_domain(),
|
||||
"parameters": pool_parameters,
|
||||
"replicated": {
|
||||
"size": self._get_data_replication_factor()
|
||||
}
|
||||
}
|
||||
}, {
|
||||
"name": app_constants.POOL_NAMES[app_constants.SVC_ECBLOCK]['data'],
|
||||
"spec": {
|
||||
"failureDomain": self._get_failure_domain(),
|
||||
"parameters": pool_parameters,
|
||||
"replicated": {
|
||||
"size": self._get_data_replication_factor()
|
||||
"metadataPool": {
|
||||
"failureDomain": self._get_failure_domain(),
|
||||
"parameters": pool_parameters,
|
||||
"replicated": {
|
||||
"size": self._get_data_replication_factor()
|
||||
}
|
||||
},
|
||||
"deviceClass": "hdd"
|
||||
}
|
||||
"dataPool": {
|
||||
"failureDomain": self._get_failure_domain(),
|
||||
"parameters": pool_parameters,
|
||||
"replicated": {
|
||||
"size": self._get_data_replication_factor()
|
||||
}
|
||||
},
|
||||
},
|
||||
"parameters": parameters,
|
||||
"storageClass": self._get_ecblocksc_overrides()
|
||||
}]
|
||||
|
||||
return ec_block_config
|
||||
|
||||
def _get_ecblocksc_overrides(self):
|
||||
|
||||
parameters = {
|
||||
"clusterID": 'rook-ceph',
|
||||
"dataPool": app_constants.POOL_NAME_ECBLOCK_DATA,
|
||||
"pool": app_constants.POOL_NAME_ECBLOCK_METADATA,
|
||||
'imageFormat': '2',
|
||||
'imageFeatures': 'layering',
|
||||
}
|
||||
|
||||
ec_block_sc_config = {
|
||||
"name": app_constants.STORAGE_CLASS_NAMES[app_constants.SVC_ECBLOCK],
|
||||
"isDefault": True,
|
||||
"parameters": parameters,
|
||||
"allowVolumeExpansion": True,
|
||||
"reclaimPolicy": "Delete"
|
||||
}
|
||||
|
||||
return ec_block_sc_config
|
||||
|
||||
def _get_rgw_overrides(self):
|
||||
|
||||
pool_parameters = {
|
||||
@@ -403,12 +407,10 @@ class RookCephClusterHelm(storage.StorageBaseHelm):
|
||||
# CephBlockPool CRD and builtin-mgr is not used by a
|
||||
# CephECStorageClass.
|
||||
'cephBlockPools': self._get_builtin_block_overrides(),
|
||||
'cephECStorageClass': self._get_ecblocksc_overrides(),
|
||||
})
|
||||
else:
|
||||
overrides[app_constants.HELM_NS_ROOK_CEPH].update(
|
||||
{'cephECStorageClass': [],
|
||||
'cephECBlockPools': []})
|
||||
{'cephECBlockPools': []})
|
||||
|
||||
# Enable optional filesystem store
|
||||
if app_utils.is_service_enabled(self.dbapi, app_constants.SVC_FS):
|
||||
|
@@ -61,6 +61,8 @@ class RookCephAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
if hook_info.operation == constants.APP_REMOVE_OP:
|
||||
return self.pre_remove(app_op)
|
||||
if hook_info.operation == constants.APP_APPLY_OP:
|
||||
if app.status == constants.APP_UPDATE_IN_PROGRESS:
|
||||
self.prepare_update(app_op)
|
||||
return self.pre_apply_fluxcd_checks(app_op)
|
||||
|
||||
# Resources
|
||||
@@ -165,6 +167,115 @@ class RookCephAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
LOG.info("Removing ceph alarms")
|
||||
self.remove_alarms(app_op)
|
||||
|
||||
def prepare_update(self, app_op):
|
||||
""" Prepare update actions
|
||||
|
||||
:param app_op: AppOperator object
|
||||
:param app: AppOperator.Application object
|
||||
:param hook_info: LifecycleHookInfo object
|
||||
|
||||
"""
|
||||
kube_core = client.CoreV1Api()
|
||||
kube_client = client.ApiClient()
|
||||
kube_custom = client.CustomObjectsApi()
|
||||
|
||||
# Pre update actions from v1.13 to v1.16:
|
||||
if self.rook_ceph_version_matches("v1.13.7"):
|
||||
|
||||
# Starting with version v1.14, the ReadAffinity was removed from the helm charts and values.yaml.
|
||||
# To maintain the current working clusters, the previous configuration on this parameter is rewritten
|
||||
# at the new location: at the cephcluster resource.
|
||||
operator_config = self.get_kube_resource(kube_core,
|
||||
"rook-ceph-operator-config",
|
||||
app_constants.HELM_NS_ROOK_CEPH,
|
||||
'config_map')
|
||||
if operator_config is not None:
|
||||
if operator_config.data.get("CSI_ENABLE_READ_AFFINITY") == 'true':
|
||||
LOG.info("CSI Read Affinity option detected on operator configmap, applying it to cephcluster...")
|
||||
patch_body = {
|
||||
"spec": {
|
||||
"csi": {
|
||||
"readAffinity": {
|
||||
"enabled": True
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
try:
|
||||
kube_custom.patch_namespaced_custom_object(
|
||||
group='ceph.rook.io',
|
||||
version='v1',
|
||||
namespace=app_constants.HELM_NS_ROOK_CEPH,
|
||||
plural='cephclusters',
|
||||
name='rook-ceph',
|
||||
body=patch_body
|
||||
)
|
||||
LOG.info("CSI readAffinity configuration applied on cephcluster successfully")
|
||||
except client.ApiException as e:
|
||||
LOG.error("Error patching cephcluster: %s" % e)
|
||||
|
||||
# Starting with version v1.15, upstream Rook Ceph modified the
|
||||
# pool names for ecblock. When updating from version v1.13, this
|
||||
# adjustment is necessary to stay aligned with upstream by adopting the
|
||||
# new pool names
|
||||
if app_utils.is_service_enabled(app_op._dbapi, app_constants.SVC_ECBLOCK):
|
||||
LOG.info(
|
||||
"Renaming old ecblock pool names, required when updating from Rook Ceph v1.13.7"
|
||||
)
|
||||
self.rename_ecblock_pools()
|
||||
|
||||
# Starting with version v1.16, csi pluginholder pods are deprecated
|
||||
# and should be removed from the cluster. The remove-pluginholder jobs
|
||||
# are used to monitor all the csi mounts and as soon as all volumes are
|
||||
# mounted outside pluginholder pods, the daemonsets are deleted
|
||||
if (app_utils.is_service_enabled(app_op._dbapi, app_constants.SVC_BLOCK) or
|
||||
app_utils.is_service_enabled(app_op._dbapi, app_constants.SVC_FS)):
|
||||
rbac_api = client.RbacAuthorizationV1Api()
|
||||
job_daemonset_patch = {
|
||||
"rules": [
|
||||
{
|
||||
"apiGroups": ["apps"],
|
||||
"resources": ["daemonsets"],
|
||||
"verbs": ["get", "list", "delete"]
|
||||
}
|
||||
]
|
||||
}
|
||||
rbac_api.patch_namespaced_role(
|
||||
name=app_constants.HELM_CHART_ROOK_CEPH_PROVISIONER,
|
||||
namespace=app_constants.HELM_NS_ROOK_CEPH,
|
||||
body=job_daemonset_patch
|
||||
)
|
||||
|
||||
if app_utils.is_service_enabled(app_op._dbapi, app_constants.SVC_BLOCK):
|
||||
LOG.info("Creating job to remove pluginholder rbd pods")
|
||||
job_remove_pluginholder_rbd_template = self.read_template("job-remove-pluginholder-rbd.yaml")
|
||||
job_remove_pluginholder_rbd_resource_path = '/tmp/job-remove-pluginholder-rbd.yaml'
|
||||
|
||||
self.create_kube_resource_file(job_remove_pluginholder_rbd_resource_path,
|
||||
job_remove_pluginholder_rbd_template.template)
|
||||
# Creating job to remove pluginholder rbd pods
|
||||
try:
|
||||
utils.create_from_yaml(kube_client, job_remove_pluginholder_rbd_resource_path)
|
||||
except Exception as err:
|
||||
LOG.error("Exception raised from creating remove pluginholder rbd pods job: %s" % err)
|
||||
return
|
||||
|
||||
if app_utils.is_service_enabled(app_op._dbapi, app_constants.SVC_FS):
|
||||
LOG.info("Creating job to remove pluginholder cephfs pods")
|
||||
job_remove_pluginholder_cephfs_template = self.read_template("job-remove-pluginholder-cephfs.yaml")
|
||||
job_remove_pluginholder_cephfs_resource_path = '/tmp/job-remove-pluginholder-cephfs.yaml'
|
||||
|
||||
self.create_kube_resource_file(job_remove_pluginholder_cephfs_resource_path,
|
||||
job_remove_pluginholder_cephfs_template.template)
|
||||
|
||||
# Creating job to remove pluginholder cephfs pods
|
||||
try:
|
||||
utils.create_from_yaml(kube_client, job_remove_pluginholder_cephfs_resource_path)
|
||||
except Exception as err:
|
||||
LOG.error("Exception raised from creating remove pluginholder cephfs pods job: %s" % err)
|
||||
return
|
||||
|
||||
def post_apply(self, app, app_op, hook_info):
|
||||
""" Post apply actions
|
||||
|
||||
@@ -1915,7 +2026,7 @@ class RookCephAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
name: rook-data
|
||||
containers:
|
||||
- name: remove
|
||||
image: registry.local:9001/quay.io/ceph/ceph:v18.2.2
|
||||
image: registry.local:9001/quay.io/ceph/ceph:v18.2.5
|
||||
command: ["/bin/bash"]
|
||||
args: ["-c", "while [ -d /var/lib/rook/mon-$TARGET_MON ];\
|
||||
do rm -rf /var/lib/rook/mon-$TARGET_MON; sleep 1;done"]
|
||||
@@ -2091,3 +2202,27 @@ class RookCephAppLifecycleOperator(base.AppLifecycleOperator):
|
||||
# Deleting yaml files used to create job and config map
|
||||
self.delete_kube_resource_file(job_wipe_disks_resource_path)
|
||||
self.delete_kube_resource_file(sa_wipe_disks_resource_path)
|
||||
|
||||
def rook_ceph_version_matches(self, version):
|
||||
resources = client.AppsV1Api().list_namespaced_deployment(
|
||||
"rook-ceph",
|
||||
label_selector=f"rook-version={version},app=rook-ceph-mon",
|
||||
)
|
||||
return bool(resources.items)
|
||||
|
||||
def rename_ecblock_pools(self):
|
||||
_, stderr = cutils.trycmd("ceph", "osd", "pool", "rename",
|
||||
"ec-data-pool",
|
||||
app_constants.POOL_NAME_ECBLOCK,
|
||||
discard_warnings=True)
|
||||
if stderr:
|
||||
raise exception.CephPoolCreateFailure(
|
||||
name=app_constants.POOL_NAME_ECBLOCK, reason=stderr)
|
||||
|
||||
_, stderr = cutils.trycmd("ceph", "osd", "pool", "rename",
|
||||
"ec-metadata-pool",
|
||||
app_constants.POOL_NAME_ECBLOCK_METADATA,
|
||||
discard_warnings=True)
|
||||
if stderr:
|
||||
raise exception.CephPoolCreateFailure(
|
||||
name=app_constants.POOL_NAME_ECBLOCK_METADATA, reason=stderr)
|
||||
|
@@ -0,0 +1,130 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rook-remove-pluginholder-cephfs-bin
|
||||
namespace: rook-ceph
|
||||
data:
|
||||
remove_pluginholder_cephfs.sh: |-
|
||||
#!/bin/bash
|
||||
while true; do
|
||||
IS_THERE_HOLDER_MOUNT=0
|
||||
|
||||
ADDRESS_PVC=$(ceph tell "mds.*" client ls --format json 2> /dev/null | sed 's/^[^:]*: //' | jq -r '.[].entity.addr.addr' | cut -d ':' -f 1)
|
||||
ADDRESS_HOLDER=$(kubectl get pod -l app=csi-cephfsplugin-holder -n rook-ceph -o jsonpath='{.items[*].status.podIP}')
|
||||
|
||||
for holder in $ADDRESS_HOLDER; do
|
||||
|
||||
while read pod; do
|
||||
if [ "$pod" == "$holder" ]; then
|
||||
IS_THERE_HOLDER_MOUNT=1
|
||||
fi
|
||||
done <<< "$ADDRESS_PVC"
|
||||
|
||||
done
|
||||
|
||||
if [ "$IS_THERE_HOLDER_MOUNT" == "0" ]; then
|
||||
echo "Safe to remove cephfs pluginholder daemonset"
|
||||
kubectl delete daemonset -n rook-ceph csi-cephfsplugin-holder-rook-ceph
|
||||
break
|
||||
else
|
||||
echo "Some pods are still attached to the cephfs pluginholder, cannot delete daemonset"
|
||||
fi
|
||||
|
||||
sleep 1d
|
||||
done
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: post-install, post-upgrade, post-rollback
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
meta.helm.sh/release-namespace: rook-ceph
|
||||
generation: 1
|
||||
labels:
|
||||
app.starlingx.io/component: platform
|
||||
chart: rook-ceph-provisioner
|
||||
heritage: Helm
|
||||
release: rook-ceph-provisioner
|
||||
name: rook-remove-pluginholder-cephfs
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
backoffLimit: 5
|
||||
completionMode: NonIndexed
|
||||
completions: 1
|
||||
manualSelector: false
|
||||
parallelism: 1
|
||||
podReplacementPolicy: TerminatingOrFailed
|
||||
suspend: false
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
app.starlingx.io/component: platform
|
||||
name: ceph-remove-pluginholder-cephfs
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
containers:
|
||||
- command:
|
||||
- /bin/bash
|
||||
- /tmp/mount/pluginholder/remove_pluginholder_cephfs.sh
|
||||
image: registry.local:9001/docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: remove-pluginholder
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/mount/pluginholder
|
||||
name: rook-remove-pluginholder-cephfs-bin
|
||||
- name: ceph-config
|
||||
mountPath: /etc/ceph
|
||||
- name: config-key-provision
|
||||
mountPath: /tmp/mount
|
||||
dnsPolicy: ClusterFirst
|
||||
initContainers:
|
||||
- name: init
|
||||
image: registry.local:9001/docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
|
||||
env:
|
||||
- name: ADMIN_KEYRING
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: rook-ceph-admin-keyring
|
||||
key: keyring
|
||||
- name: ROOK_MONS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: rook-ceph-mon-endpoints
|
||||
key: data
|
||||
volumeMounts:
|
||||
- name: ceph-config
|
||||
mountPath: /etc/ceph
|
||||
- name: config-key-provision
|
||||
mountPath: /tmp/mount
|
||||
restartPolicy: OnFailure
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
serviceAccount: rook-ceph-provisioner
|
||||
serviceAccountName: rook-ceph-provisioner
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
volumes:
|
||||
- configMap:
|
||||
name: rook-remove-pluginholder-cephfs-bin
|
||||
name: rook-remove-pluginholder-cephfs-bin
|
||||
- name: config-key-provision
|
||||
configMap:
|
||||
name: ceph-key-init-bin
|
||||
- name: ceph-config
|
||||
emptyDir: {}
|
@@ -0,0 +1,131 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: rook-remove-pluginholder-rbd-bin
|
||||
namespace: rook-ceph
|
||||
data:
|
||||
remove_pluginholder_rbd.sh: |-
|
||||
#!/bin/bash
|
||||
while true; do
|
||||
IS_THERE_HOLDER_MOUNT=0
|
||||
|
||||
ADDRESS_PVC=$(rbd ls --pool kube-rbd)
|
||||
ADDRESS_HOLDER=$(kubectl get pod -l app=csi-rbdplugin-holder -n rook-ceph -o jsonpath='{.items[*].status.podIP}')
|
||||
|
||||
for holder in $ADDRESS_HOLDER; do
|
||||
|
||||
while read image; do
|
||||
pod=$(rbd status --pool kube-rbd $image --format json | jq -r '.watchers[].address' | awk -F: 'OFS=":" {NF--; print $0}' | tr -d '[,]')
|
||||
if [ "$pod" == "$holder" ]; then
|
||||
IS_THERE_HOLDER_MOUNT=1
|
||||
fi
|
||||
done <<< "$ADDRESS_PVC"
|
||||
|
||||
done
|
||||
|
||||
if [ "$IS_THERE_HOLDER_MOUNT" == "0" ]; then
|
||||
echo "Safe to remove rbd pluginholder daemonset"
|
||||
kubectl delete daemonset -n rook-ceph csi-rbdplugin-holder-rook-ceph
|
||||
break
|
||||
else
|
||||
echo "Some pods are still attached to the rbd pluginholder, cannot delete daemonset"
|
||||
fi
|
||||
|
||||
sleep 1d
|
||||
|
||||
done
|
||||
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/hook: post-install, post-upgrade, post-rollback
|
||||
helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded
|
||||
meta.helm.sh/release-namespace: rook-ceph
|
||||
generation: 1
|
||||
labels:
|
||||
app.starlingx.io/component: platform
|
||||
chart: rook-ceph-provisioner
|
||||
heritage: Helm
|
||||
release: rook-ceph-provisioner
|
||||
name: rook-remove-pluginholder-rbd
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
backoffLimit: 5
|
||||
completionMode: NonIndexed
|
||||
completions: 1
|
||||
manualSelector: false
|
||||
parallelism: 1
|
||||
podReplacementPolicy: TerminatingOrFailed
|
||||
suspend: false
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.starlingx.io/component: platform
|
||||
name: ceph-remove-pluginholder-rbd
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/control-plane: ""
|
||||
containers:
|
||||
- command:
|
||||
- /bin/bash
|
||||
- /tmp/mount/pluginholder/remove_pluginholder_rbd.sh
|
||||
image: registry.local:9001/docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: remove-pluginholder
|
||||
resources: {}
|
||||
terminationMessagePath: /dev/termination-log
|
||||
terminationMessagePolicy: File
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/mount/pluginholder
|
||||
name: rook-remove-pluginholder-rbd-bin
|
||||
- name: ceph-config
|
||||
mountPath: /etc/ceph
|
||||
- name: config-key-provision
|
||||
mountPath: /tmp/mount
|
||||
dnsPolicy: ClusterFirst
|
||||
initContainers:
|
||||
- name: init
|
||||
image: registry.local:9001/docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
command: [ "/bin/bash", "/tmp/mount/provision.sh" ]
|
||||
env:
|
||||
- name: ADMIN_KEYRING
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: rook-ceph-admin-keyring
|
||||
key: keyring
|
||||
- name: ROOK_MONS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: rook-ceph-mon-endpoints
|
||||
key: data
|
||||
volumeMounts:
|
||||
- name: ceph-config
|
||||
mountPath: /etc/ceph
|
||||
- name: config-key-provision
|
||||
mountPath: /tmp/mount
|
||||
restartPolicy: OnFailure
|
||||
schedulerName: default-scheduler
|
||||
securityContext: {}
|
||||
serviceAccount: rook-ceph-provisioner
|
||||
serviceAccountName: rook-ceph-provisioner
|
||||
terminationGracePeriodSeconds: 30
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/control-plane
|
||||
operator: Exists
|
||||
volumes:
|
||||
- configMap:
|
||||
name: rook-remove-pluginholder-rbd-bin
|
||||
name: rook-remove-pluginholder-rbd-bin
|
||||
- name: config-key-provision
|
||||
configMap:
|
||||
name: ceph-key-init-bin
|
||||
- name: ceph-config
|
||||
emptyDir: {}
|
@@ -30,7 +30,7 @@ spec:
|
||||
name: device
|
||||
containers:
|
||||
- name: remove
|
||||
image: registry.local:9001/quay.io/ceph/ceph:v18.2.2
|
||||
image: registry.local:9001/quay.io/ceph/ceph:v18.2.5
|
||||
command: ["/bin/bash"]
|
||||
args: [
|
||||
"-c",
|
||||
|
@@ -1,3 +1,9 @@
|
||||
stx-rook-ceph-helm (2.1-0) unstable; urgency=medium
|
||||
|
||||
* Upversion to 1.16
|
||||
|
||||
-- Caio Cesar Correa <caio.correa@windriver.com> Fri, 02 May 2025 09:45:00 -0300
|
||||
|
||||
stx-rook-ceph-helm (2.0-0) unstable; urgency=medium
|
||||
|
||||
* Initial release.
|
||||
|
@@ -1,6 +1,6 @@
|
||||
---
|
||||
debname: stx-rook-ceph-helm
|
||||
debver: 2.0-0
|
||||
debver: 2.1-0
|
||||
src_path: stx-rook-ceph-helm
|
||||
src_files:
|
||||
- files
|
||||
@@ -8,4 +8,4 @@ revision:
|
||||
dist: $STX_DIST
|
||||
GITREVCOUNT:
|
||||
SRC_DIR: ${MY_REPO}/stx/app-rook-ceph
|
||||
BASE_SRCREV: c6c693d51cdc6daa4eafe34ccab5ce35496bf516
|
||||
BASE_SRCREV: 78b6f162a3e44a5e6c728661ff2f6ab1ebafce36
|
||||
|
@@ -72,3 +72,15 @@ cephBlockPools:
|
||||
imageFormat: '2'
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: Immediate
|
||||
|
||||
# -- Settings for the block pool snapshot class
|
||||
# @default -- See [RBD Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#rbd-snapshots)
|
||||
cephBlockPoolsVolumeSnapshotClass:
|
||||
enabled: true
|
||||
name: rbd-snapshot
|
||||
isDefault: false
|
||||
deletionPolicy: Delete
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#rbd-snapshots for available configuration
|
||||
parameters: {}
|
@@ -6,31 +6,51 @@
|
||||
# NOTE: keep alphabetical so can easily compare with runtime env
|
||||
|
||||
cephECBlockPools:
|
||||
# For erasure coded a replicated metadata pool is required.
|
||||
# https://rook.io/docs/rook/latest/CRDs/Shared-Filesystem/ceph-filesystem-crd/#erasure-coded
|
||||
- name: ec-metadata-pool
|
||||
# see https://github.com/rook/rook/blob/master/Documentation/CRDs/Block-Storage/ceph-block-pool-crd.md#spec for available configuration
|
||||
spec:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 2
|
||||
- name: ec-data-pool
|
||||
spec:
|
||||
failureDomain: host
|
||||
replicated:
|
||||
size: 2
|
||||
deviceClass: hdd
|
||||
- name: ec-pool
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: 2
|
||||
dataPool:
|
||||
failureDomain: osd
|
||||
replicated:
|
||||
size: 2
|
||||
deviceClass: hdd
|
||||
|
||||
# cephECStorageClass also is disabled by default, please remove the comments and set desired values to enable it
|
||||
# if cephECBlockPools are uncommented you must remove the comments of cephEcStorageClass as well
|
||||
cephECStorageClass:
|
||||
name: rook-ceph-block
|
||||
parameters:
|
||||
clusterID: rook-ceph
|
||||
dataPool: ec-data-pool
|
||||
pool: ec-metadata-pool
|
||||
parameters:
|
||||
# clusterID is the namespace where the rook cluster is running
|
||||
# If you change this namespace, also change the namespace below where the secret namespaces are defined
|
||||
clusterID: rook-ceph # namespace:cluster
|
||||
# (optional) mapOptions is a comma-separated list of map options.
|
||||
# For krbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# For nbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
||||
# mapOptions: lock_on_read,queue_depth=1024
|
||||
|
||||
imageFormat: "2"
|
||||
imageFeatures: layering
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: Delete
|
||||
# (optional) unmapOptions is a comma-separated list of unmap options.
|
||||
# For krbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd/#kernel-rbd-krbd-options
|
||||
# For nbd options refer
|
||||
# https://docs.ceph.com/docs/latest/man/8/rbd-nbd/#options
|
||||
# unmapOptions: force
|
||||
|
||||
# RBD image format. Defaults to "2".
|
||||
imageFormat: "2"
|
||||
|
||||
# RBD image features, equivalent to OR'd bitfield value: 63
|
||||
# Available for imageFormat: "2". Older releases of CSI RBD
|
||||
# support only the `layering` feature. The Linux kernel (KRBD) supports the
|
||||
# full feature complement as of 5.4
|
||||
# imageFeatures: layering,fast-diff,object-map,deep-flatten,exclusive-lock
|
||||
imageFeatures: layering
|
||||
|
||||
storageClass:
|
||||
provisioner: rook-ceph.rbd.csi.ceph.com # csi-provisioner-name
|
||||
enabled: true
|
||||
name: rook-ceph-block
|
||||
isDefault: false
|
||||
annotations: { }
|
||||
labels: { }
|
||||
allowVolumeExpansion: true
|
||||
reclaimPolicy: Delete
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -69,5 +69,14 @@ cephFileSystems:
|
||||
# in hyperconverged settings where the volume is mounted on the same node as the osds.
|
||||
csi.storage.k8s.io/fstype: ext4
|
||||
|
||||
|
||||
|
||||
# -- Settings for the filesystem snapshot class
|
||||
# @default -- See [CephFS Snapshots](../Storage-Configuration/Ceph-CSI/ceph-csi-snapshot.md#cephfs-snapshots)
|
||||
cephFileSystemVolumeSnapshotClass:
|
||||
enabled: true
|
||||
name: cephfs-snapshot
|
||||
isDefault: true
|
||||
deletionPolicy: Delete
|
||||
annotations: {}
|
||||
labels: {}
|
||||
# see https://rook.io/docs/rook/v1.10/Storage-Configuration/Ceph-CSI/ceph-csi-snapshot/#cephfs-snapshots for available configuration
|
||||
parameters: {}
|
||||
|
@@ -34,7 +34,7 @@ cephClusterSpec:
|
||||
mds_session_blocklist_on_evict: "false"
|
||||
dataDirHostPath: /var/lib/ceph/data
|
||||
cephVersion:
|
||||
image: quay.io/ceph/ceph:v18.2.2
|
||||
image: quay.io/ceph/ceph:v18.2.5
|
||||
allowUnsupported: true
|
||||
network:
|
||||
connections:
|
||||
@@ -221,7 +221,7 @@ cephClusterSpec:
|
||||
|
||||
toolbox:
|
||||
enabled: true
|
||||
image: quay.io/ceph/ceph:v18.2.2
|
||||
image: quay.io/ceph/ceph:v18.2.5
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
@@ -271,4 +271,4 @@ hook:
|
||||
roleBinding: rook-ceph-cleanup
|
||||
serviceAccount: rook-ceph-cleanup
|
||||
mon_hosts:
|
||||
- controller-0
|
||||
- controller
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -7,5 +7,5 @@
|
||||
|
||||
images:
|
||||
tags:
|
||||
ceph: quay.io/ceph/ceph:v18.2.2
|
||||
ceph: quay.io/ceph/ceph:v18.2.5
|
||||
kubectl: docker.io/bitnami/kubectl:1.29
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -77,7 +77,6 @@ imagePullSecrets:
|
||||
images:
|
||||
tags:
|
||||
ceph_config_helper: docker.io/openstackhelm/ceph-config-helper:ubuntu_jammy_18.2.2-1-20240312
|
||||
k8s_entrypoint: quay.io/airshipit/kubernetes-entrypoint:v1.0.0
|
||||
stx_ceph_manager: docker.io/starlingx/stx-ceph-manager:stx.10.0-v18.2.2-0
|
||||
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2024 Wind River Systems, Inc.
|
||||
# Copyright (c) 2024-2025 Wind River Systems, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
#
|
||||
@@ -29,9 +29,6 @@ crds:
|
||||
enabled: true
|
||||
|
||||
csi:
|
||||
attacher:
|
||||
# -- Kubernetes CSI Attacher image
|
||||
image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0
|
||||
|
||||
# -- Whether to skip any attach operation altogether for CephFS PVCs. See more details
|
||||
# [here](https://kubernetes-csi.github.io/docs/skip-attach.html#skip-attach-with-csi-driver-object).
|
||||
@@ -46,7 +43,40 @@ csi:
|
||||
|
||||
cephcsi:
|
||||
# -- Ceph CSI image
|
||||
image: quay.io/cephcsi/cephcsi:v3.10.2
|
||||
repository: quay.io/cephcsi/cephcsi
|
||||
# -- Ceph CSI image tag
|
||||
tag: v3.13.1
|
||||
|
||||
registrar:
|
||||
# -- Kubernetes CSI registrar image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-node-driver-registrar
|
||||
# -- Registrar image tag
|
||||
tag: v2.13.0
|
||||
|
||||
provisioner:
|
||||
# -- Kubernetes CSI provisioner image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-provisioner
|
||||
# -- Provisioner image tag
|
||||
tag: v5.1.0
|
||||
|
||||
snapshotter:
|
||||
# -- Kubernetes CSI snapshotter image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-snapshotter
|
||||
# -- Snapshotter image tag
|
||||
tag: v8.2.0
|
||||
|
||||
attacher:
|
||||
# -- Kubernetes CSI Attacher image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-attacher
|
||||
# -- Attacher image tag
|
||||
tag: v4.8.0
|
||||
|
||||
resizer:
|
||||
# -- Kubernetes CSI resizer image repository
|
||||
repository: registry.k8s.io/sig-storage/csi-resizer
|
||||
# -- Resizer image tag
|
||||
tag: v1.13.1
|
||||
|
||||
|
||||
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
|
||||
cephfsPodLabels: app.starlingx.io/component=platform
|
||||
@@ -176,7 +206,7 @@ csi:
|
||||
# -- Enable host networking for CSI CephFS and RBD nodeplugins. This may be necessary
|
||||
# in some network configurations where the SDN does not provide access to an external cluster or
|
||||
# there is significant drop in read/write performance
|
||||
enableCSIHostNetwork: false
|
||||
enableCSIHostNetwork: true
|
||||
# -- Enable Ceph CSI CephFS driver
|
||||
enableCephfsDriver: true
|
||||
# -- Enable Snapshotter in CephFS provisioner pod
|
||||
@@ -207,10 +237,6 @@ csi:
|
||||
pluginTolerations:
|
||||
- operator: "Exists"
|
||||
|
||||
provisioner:
|
||||
# -- Kubernetes CSI provisioner image
|
||||
image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0
|
||||
|
||||
# -- PriorityClassName to be set on csi driver provisioner pods
|
||||
provisionerPriorityClassName: system-cluster-critical
|
||||
|
||||
@@ -232,19 +258,6 @@ csi:
|
||||
# -- Labels to add to the CSI CephFS Deployments and DaemonSets Pods
|
||||
rbdPodLabels: "app.starlingx.io/component=platform"
|
||||
|
||||
registrar:
|
||||
# -- Kubernetes CSI registrar image
|
||||
image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0
|
||||
|
||||
resizer:
|
||||
# -- Kubernetes CSI resizer image
|
||||
image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0
|
||||
|
||||
snapshotter:
|
||||
# -- Kubernetes CSI snapshotter image
|
||||
image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1
|
||||
|
||||
|
||||
currentNamespaceOnly: false
|
||||
|
||||
# Disable automatic orchestration when new devices are discovered.
|
||||
@@ -264,7 +277,7 @@ hostpathRequiresPrivileged: false
|
||||
image:
|
||||
prefix: rook
|
||||
repository: docker.io/rook/ceph
|
||||
tag: v1.13.7
|
||||
tag: v1.16.6
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
# imagePullSecrets option allow to pull docker images from private docker registry. Option will be passed to all service accounts.
|
||||
|
Reference in New Issue
Block a user