apiVersion: ceph.rook.io/v1 kind: CephCluster metadata: name: rook-ceph namespace: rook-ceph spec: dataDirHostPath: /var/lib/rook mon: count: 3 allowMultiplePerNode: false volumeClaimTemplate: spec: storageClassName: topolvm-provisioner resources: requests: storage: 3Gi cephVersion: image: ceph/ceph:v15.2.4 allowUnsupported: false skipUpgradeChecks: false continueUpgradeAfterChecksEvenIfNotHealthy: false mgr: modules: - name: pg_autoscaler enabled: true dashboard: enabled: true ssl: true network: provider: host # rook 1.3からhost networkを使用したい場合このように記載する crashCollector: disable: false storage: storageClassDeviceSets: - name: topolvm-ssd-cluster count: 6 portable: false tuneSlowDeviceClass: true encrypted: false placement: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: app operator: In values: - rook-ceph-osd - key: app operator: In values: - rook-ceph-osd-prepare topologyKey: kubernetes.io/hostname topologySpreadConstraints: # rackで分散配置 - maxSkew: 1 topologyKey: topology.rook.io/rack whenUnsatisfiable: DoNotSchedule labelSelector: matchExpressions: - key: app operator: In values: - rook-ceph-osd - rook-ceph-osd-prepare # rack内のnodeで分散配置(今回はrack内にnode1台なので意味はない) - maxSkew: 1 topologyKey: kubernetes.io/hostname whenUnsatisfiable: DoNotSchedule labelSelector: matchExpressions: - key: app operator: In values: - rook-ceph-osd - rook-ceph-osd-prepare resources: volumeClaimTemplates: - metadata: name: data spec: resources: requests: storage: 10Gi storageClassName: topolvm-provisioner volumeMode: Block accessModes: - ReadWriteOnce disruptionManagement: managePodBudgets: false osdMaintenanceTimeout: 30 manageMachineDisruptionBudgets: false machineDisruptionBudgetNamespace: openshift-machine-api