5.17. ハブクラスターのリファレンス設定 CR
以下は、4.19 の通信事業者管理ハブリファレンス設定の全カスタムリソース (CR) の完全な YAML リファレンスです。
5.17.1. RHACM のリファレンス YAML リンクのコピーリンクがクリップボードにコピーされました!
acmAgentServiceConfig.yaml
---
apiVersion: agent-install.openshift.io/v1beta1
kind: AgentServiceConfig
metadata:
name: agent
annotations:
argocd.argoproj.io/sync-wave: "7"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
databaseStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
filesystemStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
imageStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
mirrorRegistryRef:
name: mirror-registry-config
osImages:
# Replace <http-server-address:port> with the address of the local web server that stores the RHCOS images.
# The images can be downloaded from "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/".
- cpuArchitecture: "x86_64"
openshiftVersion: "4.17"
rootFSUrl: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live.x86_64.iso
version: "417.94.202409121747-0"
- cpuArchitecture: "x86_64"
openshiftVersion: "4.18"
rootFSUrl: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live.x86_64.iso
version: "418.94.202502100215-0"
- cpuArchitecture: "x86_64"
openshiftVersion: "4.19"
rootFSUrl: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-iso.x86_64.iso
version: "9.6.20250530-0"
---
apiVersion: agent-install.openshift.io/v1beta1
kind: AgentServiceConfig
metadata:
name: agent
annotations:
argocd.argoproj.io/sync-wave: "7"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
databaseStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
filesystemStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
imageStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
mirrorRegistryRef:
name: mirror-registry-config
osImages:
# Replace <http-server-address:port> with the address of the local web server that stores the RHCOS images.
# The images can be downloaded from "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/".
- cpuArchitecture: "x86_64"
openshiftVersion: "4.17"
rootFSUrl: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live.x86_64.iso
version: "417.94.202409121747-0"
- cpuArchitecture: "x86_64"
openshiftVersion: "4.18"
rootFSUrl: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live.x86_64.iso
version: "418.94.202502100215-0"
- cpuArchitecture: "x86_64"
openshiftVersion: "4.19"
rootFSUrl: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-iso.x86_64.iso
version: "9.6.20250530-0"
acmMCE.yaml
apiVersion: multicluster.openshift.io/v1
kind: MultiClusterEngine
metadata:
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: multiclusterengine
spec:
availabilityConfig: High
overrides:
components:
- configOverrides: {}
enabled: true
name: local-cluster
- configOverrides: {}
enabled: true
name: assisted-service
- configOverrides: {}
enabled: true
name: cluster-lifecycle
- configOverrides: {}
enabled: true
name: cluster-manager
- configOverrides: {}
enabled: true
name: discovery
- configOverrides: {}
enabled: true
name: hive
- configOverrides: {}
enabled: true
name: server-foundation
- configOverrides: {}
enabled: true
name: cluster-proxy-addon
- configOverrides: {}
enabled: true
name: hypershift-local-hosting
- configOverrides: {}
enabled: true
name: hypershift
- configOverrides: {}
enabled: true
name: managedserviceaccount
- configOverrides: {}
enabled: false
name: cluster-api-preview
- configOverrides: {}
enabled: false
name: cluster-api-provider-aws-preview
- configOverrides: {}
enabled: true
name: image-based-install-operator
- configOverrides: {}
enabled: true
name: console-mce
targetNamespace: multicluster-engine
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/infra
operator: Exists
apiVersion: multicluster.openshift.io/v1
kind: MultiClusterEngine
metadata:
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: multiclusterengine
spec:
availabilityConfig: High
overrides:
components:
- configOverrides: {}
enabled: true
name: local-cluster
- configOverrides: {}
enabled: true
name: assisted-service
- configOverrides: {}
enabled: true
name: cluster-lifecycle
- configOverrides: {}
enabled: true
name: cluster-manager
- configOverrides: {}
enabled: true
name: discovery
- configOverrides: {}
enabled: true
name: hive
- configOverrides: {}
enabled: true
name: server-foundation
- configOverrides: {}
enabled: true
name: cluster-proxy-addon
- configOverrides: {}
enabled: true
name: hypershift-local-hosting
- configOverrides: {}
enabled: true
name: hypershift
- configOverrides: {}
enabled: true
name: managedserviceaccount
- configOverrides: {}
enabled: false
name: cluster-api-preview
- configOverrides: {}
enabled: false
name: cluster-api-provider-aws-preview
- configOverrides: {}
enabled: true
name: image-based-install-operator
- configOverrides: {}
enabled: true
name: console-mce
targetNamespace: multicluster-engine
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/infra
operator: Exists
acmMCH.yaml
---
apiVersion: operator.open-cluster-management.io/v1
kind: MultiClusterHub
metadata:
annotations:
argocd.argoproj.io/sync-wave: "4"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
installer.open-cluster-management.io/mce-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}'
installer.open-cluster-management.io/oadp-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}'
name: multiclusterhub
namespace: open-cluster-management
spec:
availabilityConfig: High
enableClusterBackup: false
ingress: {}
overrides:
components:
- configOverrides: {}
enabled: true
name: app-lifecycle
- configOverrides: {}
enabled: true
name: cluster-lifecycle
- configOverrides: {}
enabled: true
name: cluster-permission
- configOverrides: {}
enabled: true
name: console
- configOverrides: {}
enabled: true
name: grc
- configOverrides: {}
enabled: true
name: insights
- configOverrides: {}
enabled: true
name: multicluster-engine
- configOverrides: {}
enabled: true
name: multicluster-observability
- configOverrides: {}
enabled: true
name: search
- configOverrides: {}
enabled: true
name: submariner-addon
- configOverrides: {}
enabled: true
name: volsync
- configOverrides: {}
enabled: true
name: cluster-backup
- configOverrides: {}
enabled: true
name: siteconfig
- configOverrides: {}
enabled: false
name: edge-manager-preview
separateCertificateManagement: false
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "4"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: global
namespace: openshift-storage
spec:
clusterSet: global
---
apiVersion: operator.open-cluster-management.io/v1
kind: MultiClusterHub
metadata:
annotations:
argocd.argoproj.io/sync-wave: "4"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
installer.open-cluster-management.io/mce-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}'
installer.open-cluster-management.io/oadp-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}'
name: multiclusterhub
namespace: open-cluster-management
spec:
availabilityConfig: High
enableClusterBackup: false
ingress: {}
overrides:
components:
- configOverrides: {}
enabled: true
name: app-lifecycle
- configOverrides: {}
enabled: true
name: cluster-lifecycle
- configOverrides: {}
enabled: true
name: cluster-permission
- configOverrides: {}
enabled: true
name: console
- configOverrides: {}
enabled: true
name: grc
- configOverrides: {}
enabled: true
name: insights
- configOverrides: {}
enabled: true
name: multicluster-engine
- configOverrides: {}
enabled: true
name: multicluster-observability
- configOverrides: {}
enabled: true
name: search
- configOverrides: {}
enabled: true
name: submariner-addon
- configOverrides: {}
enabled: true
name: volsync
- configOverrides: {}
enabled: true
name: cluster-backup
- configOverrides: {}
enabled: true
name: siteconfig
- configOverrides: {}
enabled: false
name: edge-manager-preview
separateCertificateManagement: false
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "4"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: global
namespace: openshift-storage
spec:
clusterSet: global
acmMirrorRegistryCM.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mirror-registry-config
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
namespace: multicluster-engine
labels:
app: assisted-service
data:
# Add the mirror registry SSL certificate chain up to the CA itself.
ca-bundle.crt: |
-----BEGIN CERTIFICATE-----
MIID7jCCAtagAwXXX...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwXXX...
-----END CERTIFICATE-----
# The registries.conf field has been populated using the registries.conf file found in "/etc/containers/registries.conf" on each node.
# Replace <registry.example.com:8443> with the mirror registry's address.
registries.conf: |
unqualified-search-registries = ["registry.access.redhat.com", "docker.io"]
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev/ocp-release"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev/ocp-release"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/multicluster-engine"
[[registry.mirror]]
location = "<registry.example.com:8443>/multicluster-engine"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/odf4"
[[registry.mirror]]
location = "<registry.example.com:8443>/odf4"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/openshift4"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift4"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhacm2"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhacm2"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhceph"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhceph"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhel8"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhel8"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhel9"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhel9"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/ubi8"
[[registry.mirror]]
location = "<registry.example.com:8443>/ubi8"
pull-from-mirror = "tag-only"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mirror-registry-config
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
namespace: multicluster-engine
labels:
app: assisted-service
data:
# Add the mirror registry SSL certificate chain up to the CA itself.
ca-bundle.crt: |
-----BEGIN CERTIFICATE-----
MIID7jCCAtagAwXXX...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwXXX...
-----END CERTIFICATE-----
# The registries.conf field has been populated using the registries.conf file found in "/etc/containers/registries.conf" on each node.
# Replace <registry.example.com:8443> with the mirror registry's address.
registries.conf: |
unqualified-search-registries = ["registry.access.redhat.com", "docker.io"]
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev/ocp-release"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev/ocp-release"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/multicluster-engine"
[[registry.mirror]]
location = "<registry.example.com:8443>/multicluster-engine"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/odf4"
[[registry.mirror]]
location = "<registry.example.com:8443>/odf4"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/openshift4"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift4"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhacm2"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhacm2"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhceph"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhceph"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhel8"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhel8"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhel9"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhel9"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/ubi8"
[[registry.mirror]]
location = "<registry.example.com:8443>/ubi8"
pull-from-mirror = "tag-only"
acmNS.yaml
---
apiVersion: v1
kind: Namespace
metadata:
labels:
openshift.io/cluster-monitoring: "true"
name: open-cluster-management
---
apiVersion: v1
kind: Namespace
metadata:
labels:
openshift.io/cluster-monitoring: "true"
name: open-cluster-management
acmOperGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: open-cluster-management-group namespace: open-cluster-management spec: targetNamespaces: - open-cluster-management
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: open-cluster-management-group
namespace: open-cluster-management
spec:
targetNamespaces:
- open-cluster-management
acmPerfSearch.yaml
---
apiVersion: search.open-cluster-management.io/v1alpha1
kind: Search
metadata:
name: search-v2-operator
namespace: open-cluster-management
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
dbStorage:
size: 10Gi
deployments:
collector:
resources:
limits:
memory: 8Gi
requests:
cpu: 25m
memory: 64Mi
database:
envVar:
- name: POSTGRESQL_EFFECTIVE_CACHE_SIZE
value: 1024MB
- name: POSTGRESQL_SHARED_BUFFERS
value: 512MB
- name: WORK_MEM
value: 128MB
resources:
limits:
memory: 16Gi
requests:
cpu: 25m
memory: 32Mi
indexer:
resources:
limits:
memory: 4Gi
requests:
cpu: 25m
memory: 128Mi
queryapi:
replicaCount: 2
resources:
limits:
memory: 4Gi
requests:
cpu: 25m
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/infra
operator: Exists
---
apiVersion: search.open-cluster-management.io/v1alpha1
kind: Search
metadata:
name: search-v2-operator
namespace: open-cluster-management
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
dbStorage:
size: 10Gi
deployments:
collector:
resources:
limits:
memory: 8Gi
requests:
cpu: 25m
memory: 64Mi
database:
envVar:
- name: POSTGRESQL_EFFECTIVE_CACHE_SIZE
value: 1024MB
- name: POSTGRESQL_SHARED_BUFFERS
value: 512MB
- name: WORK_MEM
value: 128MB
resources:
limits:
memory: 16Gi
requests:
cpu: 25m
memory: 32Mi
indexer:
resources:
limits:
memory: 4Gi
requests:
cpu: 25m
memory: 128Mi
queryapi:
replicaCount: 2
resources:
limits:
memory: 4Gi
requests:
cpu: 25m
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/infra
operator: Exists
acmProvisioning.yaml
---
apiVersion: metal3.io/v1alpha1
kind: Provisioning
metadata:
name: provisioning-configuration
annotations:
argocd.argoproj.io/sync-wave: "6"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
watchAllNamespaces: true
# some servers do not support virtual media installations
# when the image is served using the https protocol
# disableVirtualMediaTLS: true
---
apiVersion: metal3.io/v1alpha1
kind: Provisioning
metadata:
name: provisioning-configuration
annotations:
argocd.argoproj.io/sync-wave: "6"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
watchAllNamespaces: true
# some servers do not support virtual media installations
# when the image is served using the https protocol
# disableVirtualMediaTLS: true
acmSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: open-cluster-management-subscription namespace: open-cluster-management spec: channel: release-2.13 installPlanApproval: Automatic name: advanced-cluster-management source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: open-cluster-management-subscription
namespace: open-cluster-management
spec:
channel: release-2.13
installPlanApproval: Automatic
name: advanced-cluster-management
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
observabilityMCO.yaml
---
apiVersion: observability.open-cluster-management.io/v1beta2
kind: MultiClusterObservability
metadata:
name: observability
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
# avoids MultiClusterHub Observability to own/manage the
# spoke clusters configuration about AlertManager forwards.
# ZTP Policies will be in charge of configuring it
# https://issues.redhat.com/browse/CNF-13398
mco-disable-alerting: "true"
spec:
# based on the data provided by acm-capacity tool
# https://github.com/stolostron/capacity-planning/blob/main/calculation/ObsSizingTemplate-Rev1.ipynb
# for an scenario with:
# 3500SNOs, 125 pods and 4 Namespaces (apart from Openshift NS)
# storage retention 15 days
# downsampling disabled
# default MCO Addon configuration samples_per_hour, pv_retention_hrs.
# More on how to stimate: https://access.redhat.com/articles/7103886
advanced:
retentionConfig:
blockDuration: 2h
deleteDelay: 48h
retentionInLocal: 24h
retentionResolutionRaw: 15d
enableDownsampling: false
observabilityAddonSpec:
enableMetrics: true
interval: 300
storageConfig:
storageClass: # your-fs-storageclass-here
alertmanagerStorageSize: 10Gi
compactStorageSize: 100Gi
metricObjectStorage:
# buckets storage should provide a capacity
# of at least 2.5TB
key: thanos.yaml
name: thanos-object-storage
receiveStorageSize: 10Gi
ruleStorageSize: 30Gi
storeStorageSize: 100Gi
# In addition to these storage settings, the `metricObjectStorage`
# points to an Object Storage. Under the reference configuration,
# scale and retention the estimated object storage is about 101Gi
---
apiVersion: observability.open-cluster-management.io/v1beta2
kind: MultiClusterObservability
metadata:
name: observability
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
# avoids MultiClusterHub Observability to own/manage the
# spoke clusters configuration about AlertManager forwards.
# ZTP Policies will be in charge of configuring it
# https://issues.redhat.com/browse/CNF-13398
mco-disable-alerting: "true"
spec:
# based on the data provided by acm-capacity tool
# https://github.com/stolostron/capacity-planning/blob/main/calculation/ObsSizingTemplate-Rev1.ipynb
# for an scenario with:
# 3500SNOs, 125 pods and 4 Namespaces (apart from Openshift NS)
# storage retention 15 days
# downsampling disabled
# default MCO Addon configuration samples_per_hour, pv_retention_hrs.
# More on how to stimate: https://access.redhat.com/articles/7103886
advanced:
retentionConfig:
blockDuration: 2h
deleteDelay: 48h
retentionInLocal: 24h
retentionResolutionRaw: 15d
enableDownsampling: false
observabilityAddonSpec:
enableMetrics: true
interval: 300
storageConfig:
storageClass: # your-fs-storageclass-here
alertmanagerStorageSize: 10Gi
compactStorageSize: 100Gi
metricObjectStorage:
# buckets storage should provide a capacity
# of at least 2.5TB
key: thanos.yaml
name: thanos-object-storage
receiveStorageSize: 10Gi
ruleStorageSize: 30Gi
storeStorageSize: 100Gi
# In addition to these storage settings, the `metricObjectStorage`
# points to an Object Storage. Under the reference configuration,
# scale and retention the estimated object storage is about 101Gi
observabilityNS.yaml
---
apiVersion: v1
kind: Namespace
metadata:
labels:
openshift.io/cluster-monitoring: "true"
name: open-cluster-management-observability
---
apiVersion: v1
kind: Namespace
metadata:
labels:
openshift.io/cluster-monitoring: "true"
name: open-cluster-management-observability
observabilityOBC.yaml
---
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: observability-obc
annotations:
argocd.argoproj.io/sync-wave: "8"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
namespace: open-cluster-management-observability
spec:
generateBucketName: observability-object-bucket
storageClassName: openshift-storage.noobaa.io
---
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: observability-obc
annotations:
argocd.argoproj.io/sync-wave: "8"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
namespace: open-cluster-management-observability
spec:
generateBucketName: observability-object-bucket
storageClassName: openshift-storage.noobaa.io
observabilitySecret.yaml
---
apiVersion: v1
kind: Secret
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
labels:
cluster.open-cluster-management.io/backup: ""
name: multiclusterhub-operator-pull-secret
namespace: open-cluster-management-observability
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: '' # Value provided by user or by pull-secret-openshift-config-copy policy
---
apiVersion: v1
kind: Secret
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
labels:
cluster.open-cluster-management.io/backup: ""
name: multiclusterhub-operator-pull-secret
namespace: open-cluster-management-observability
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: '' # Value provided by user or by pull-secret-openshift-config-copy policy
pull-secret-copy.yaml
---
# this policy will create a copy of the pull secret from openshift-config to open-cluster-management-observability namespace
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
name: pull-secret-copy
namespace: open-cluster-management-observability
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
policy.open-cluster-management.io/description: Policy used to copy the pull secret from openshift-config to open-cluster-management-observability namespace
spec:
remediationAction: enforce
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: pull-secret-openshift-config-copy
spec:
object-templates:
- complianceType: musthave
objectDefinition:
apiVersion: v1
data:
.dockerconfigjson: '{{- if eq (lookup "v1" "Secret" "open-cluster-management" "multiclusterhub-operator-pull-secret").kind "Secret" -}} {{- fromSecret "open-cluster-management" "multiclusterhub-operator-pull-secret" ".dockerconfigjson" -}} {{- else -}} {{- fromSecret "openshift-config" "pull-secret" ".dockerconfigjson" -}} {{- end -}}'
kind: Secret
metadata:
labels:
ccluster.open-cluster-management.io/backup: ""
name: multiclusterhub-operator-pull-secret
namespace: open-cluster-management-observability
type: kubernetes.io/dockerconfigjson
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: pull-secret-copy
namespace: open-cluster-management-observability
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: pull-secret-copy
namespace: open-cluster-management-observability
placementRef:
name: pull-secret-copy
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: pull-secret-copy
apiGroup: policy.open-cluster-management.io
kind: Policy
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: global
namespace: open-cluster-management-observability
spec:
clusterSet: global
---
# this policy will create a copy of the pull secret from openshift-config to open-cluster-management-observability namespace
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
name: pull-secret-copy
namespace: open-cluster-management-observability
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
policy.open-cluster-management.io/description: Policy used to copy the pull secret from openshift-config to open-cluster-management-observability namespace
spec:
remediationAction: enforce
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: pull-secret-openshift-config-copy
spec:
object-templates:
- complianceType: musthave
objectDefinition:
apiVersion: v1
data:
.dockerconfigjson: '{{- if eq (lookup "v1" "Secret" "open-cluster-management" "multiclusterhub-operator-pull-secret").kind "Secret" -}} {{- fromSecret "open-cluster-management" "multiclusterhub-operator-pull-secret" ".dockerconfigjson" -}} {{- else -}} {{- fromSecret "openshift-config" "pull-secret" ".dockerconfigjson" -}} {{- end -}}'
kind: Secret
metadata:
labels:
ccluster.open-cluster-management.io/backup: ""
name: multiclusterhub-operator-pull-secret
namespace: open-cluster-management-observability
type: kubernetes.io/dockerconfigjson
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: pull-secret-copy
namespace: open-cluster-management-observability
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: pull-secret-copy
namespace: open-cluster-management-observability
placementRef:
name: pull-secret-copy
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: pull-secret-copy
apiGroup: policy.open-cluster-management.io
kind: Policy
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: global
namespace: open-cluster-management-observability
spec:
clusterSet: global
thanosSecret.yaml
# This content creates a policy which copies the necessary data from
# the generated Object Bucket Claim into the necessary secret for
# observability to connect to thanos.
---
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
annotations:
policy.open-cluster-management.io/categories: CM Configuration Management
policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
policy.open-cluster-management.io/description: ""
policy.open-cluster-management.io/standards: NIST SP 800-53
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: obs-thanos-secret
namespace: hub-policies
spec:
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: thanos-secret-cp
spec:
remediationAction: enforce
severity: high
object-templates-raw: |
{{- /* read the bucket data and noobaa endpoint access data */ -}}
{{- $objBucket := (lookup "v1" "ConfigMap" "open-cluster-management-observability" "observability-obc") }}
{{- $awsAccess := (lookup "v1" "Secret" "open-cluster-management-observability" "observability-obc") }}
{{- /* create the thanos config file as a template */ -}}
{{- $thanosConfig := `
type: s3
config:
bucket: %[1]s
endpoint: %[2]s
insecure: true
access_key: %[3]s
secret_key: %[4]s
`
}}
{{- /* create the secret using the thanos configuration template created above. */ -}}
- complianceType: mustonlyhave
objectDefinition:
apiVersion: v1
kind: Secret
metadata:
name: thanos-object-storage
namespace: open-cluster-management-observability
type: Opaque
data:
thanos.yaml: {{ (printf $thanosConfig $objBucket.data.BUCKET_NAME
$objBucket.data.BUCKET_HOST
($awsAccess.data.AWS_ACCESS_KEY_ID | base64dec)
($awsAccess.data.AWS_SECRET_ACCESS_KEY | base64dec)
) | base64enc }}
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
name: obs-thanos-pl
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
name: obs-thanos-binding
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
placementRef:
name: obs-thanos-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: obs-thanos-secret
apiGroup: policy.open-cluster-management.io
kind: Policy
# For reference this is the secret which is being generated (with
# approriate values in the fields):
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: thanos-object-storage
# namespace: open-cluster-management-observability
# type: Opaque
# stringData:
# thanos.yaml: |
# type: s3
# config:
# bucket: "<BUCKET_NAME>"
# endpoint: "<BUCKET_HOST>"
# insecure: true
# access_key: "<AWS_ACCESS_KEY_ID>"
# secret_key: "<AWS_SECRET_ACCESS_KEY>"
# This content creates a policy which copies the necessary data from
# the generated Object Bucket Claim into the necessary secret for
# observability to connect to thanos.
---
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
annotations:
policy.open-cluster-management.io/categories: CM Configuration Management
policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
policy.open-cluster-management.io/description: ""
policy.open-cluster-management.io/standards: NIST SP 800-53
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: obs-thanos-secret
namespace: hub-policies
spec:
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: thanos-secret-cp
spec:
remediationAction: enforce
severity: high
object-templates-raw: |
{{- /* read the bucket data and noobaa endpoint access data */ -}}
{{- $objBucket := (lookup "v1" "ConfigMap" "open-cluster-management-observability" "observability-obc") }}
{{- $awsAccess := (lookup "v1" "Secret" "open-cluster-management-observability" "observability-obc") }}
{{- /* create the thanos config file as a template */ -}}
{{- $thanosConfig := `
type: s3
config:
bucket: %[1]s
endpoint: %[2]s
insecure: true
access_key: %[3]s
secret_key: %[4]s
`
}}
{{- /* create the secret using the thanos configuration template created above. */ -}}
- complianceType: mustonlyhave
objectDefinition:
apiVersion: v1
kind: Secret
metadata:
name: thanos-object-storage
namespace: open-cluster-management-observability
type: Opaque
data:
thanos.yaml: {{ (printf $thanosConfig $objBucket.data.BUCKET_NAME
$objBucket.data.BUCKET_HOST
($awsAccess.data.AWS_ACCESS_KEY_ID | base64dec)
($awsAccess.data.AWS_SECRET_ACCESS_KEY | base64dec)
) | base64enc }}
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
name: obs-thanos-pl
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
name: obs-thanos-binding
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
placementRef:
name: obs-thanos-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: obs-thanos-secret
apiGroup: policy.open-cluster-management.io
kind: Policy
# For reference this is the secret which is being generated (with
# approriate values in the fields):
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: thanos-object-storage
# namespace: open-cluster-management-observability
# type: Opaque
# stringData:
# thanos.yaml: |
# type: s3
# config:
# bucket: "<BUCKET_NAME>"
# endpoint: "<BUCKET_HOST>"
# insecure: true
# access_key: "<AWS_ACCESS_KEY_ID>"
# secret_key: "<AWS_SECRET_ACCESS_KEY>"
talmSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: openshift-topology-aware-lifecycle-manager-subscription namespace: openshift-operators spec: channel: stable installPlanApproval: Automatic name: topology-aware-lifecycle-manager source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: openshift-topology-aware-lifecycle-manager-subscription
namespace: openshift-operators
spec:
channel: stable
installPlanApproval: Automatic
name: topology-aware-lifecycle-manager
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
5.17.2. ストレージのリファレンス YAML リンクのコピーリンクがクリップボードにコピーされました!
lsoLocalVolume.yaml
---
apiVersion: "local.storage.openshift.io/v1"
kind: "LocalVolume"
metadata:
name: "local-disks"
namespace: "openshift-local-storage"
annotations:
argocd.argoproj.io/sync-wave: "-3"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: cluster.ocs.openshift.io/openshift-storage
operator: In
values:
- ""
storageClassDevices:
- storageClassName: "local-sc"
forceWipeDevicesAndDestroyAllData: true
volumeMode: Block
devicePaths:
- /dev/disk/by-path/pci-xxx
---
apiVersion: "local.storage.openshift.io/v1"
kind: "LocalVolume"
metadata:
name: "local-disks"
namespace: "openshift-local-storage"
annotations:
argocd.argoproj.io/sync-wave: "-3"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: cluster.ocs.openshift.io/openshift-storage
operator: In
values:
- ""
storageClassDevices:
- storageClassName: "local-sc"
forceWipeDevicesAndDestroyAllData: true
volumeMode: Block
devicePaths:
- /dev/disk/by-path/pci-xxx
lsoNS.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-local-storage
annotations:
argocd.argoproj.io/sync-wave: "-5"
labels:
openshift.io/cluster-monitoring: "true"
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-local-storage
annotations:
argocd.argoproj.io/sync-wave: "-5"
labels:
openshift.io/cluster-monitoring: "true"
lsoOperatorGroup.yaml
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: local-operator-group
namespace: openshift-local-storage
spec:
targetNamespaces:
- openshift-local-storage
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: local-operator-group
namespace: openshift-local-storage
spec:
targetNamespaces:
- openshift-local-storage
lsoSubscription.yaml
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: local-storage-operator
namespace: openshift-local-storage
spec:
channel: stable
installPlanApproval: Automatic
name: local-storage-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: local-storage-operator
namespace: openshift-local-storage
spec:
channel: stable
installPlanApproval: Automatic
name: local-storage-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
odfNS.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-storage
annotations:
argocd.argoproj.io/sync-wave: "-5"
workload.openshift.io/allowed: management
labels:
openshift.io/cluster-monitoring: "true"
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-storage
annotations:
argocd.argoproj.io/sync-wave: "-5"
workload.openshift.io/allowed: management
labels:
openshift.io/cluster-monitoring: "true"
odfOperatorGroup.yaml
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: openshift-storage-operatorgroup
namespace: openshift-storage
spec:
targetNamespaces:
- openshift-storage
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: openshift-storage-operatorgroup
namespace: openshift-storage
spec:
targetNamespaces:
- openshift-storage
odfReady.yaml
---
# this policy will ensure ODF is ready
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
name: odf-ready-validation
namespace: hub-policies
annotations:
# we do a validation of odf after StorageCluster creation
# but after MCH on ACM is created.
# MCH components like Observability and AgentServiceConfig
# come later and needs ODF ready
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
policy.open-cluster-management.io/description: Policy used ensure ODF installation is ready
spec:
remediationAction: inform # we dont want to modify/create only validate the status
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: odf-ready
spec:
object-templates:
- complianceType: musthave
objectDefinition:
apiVersion: batch/v1
kind: Job
metadata:
namespace: openshift-storage
status:
ready: 0
succeeded: 1
conditions:
- status: "True"
type: Complete
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: odf-ready-pl
namespace: hub-policies
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: odf-ready-binding
namespace: hub-policies
placementRef:
name: odf-ready-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: odf-ready-validation
apiGroup: policy.open-cluster-management.io
kind: Policy
---
# this policy will ensure ODF is ready
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
name: odf-ready-validation
namespace: hub-policies
annotations:
# we do a validation of odf after StorageCluster creation
# but after MCH on ACM is created.
# MCH components like Observability and AgentServiceConfig
# come later and needs ODF ready
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
policy.open-cluster-management.io/description: Policy used ensure ODF installation is ready
spec:
remediationAction: inform # we dont want to modify/create only validate the status
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: odf-ready
spec:
object-templates:
- complianceType: musthave
objectDefinition:
apiVersion: batch/v1
kind: Job
metadata:
namespace: openshift-storage
status:
ready: 0
succeeded: 1
conditions:
- status: "True"
type: Complete
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: odf-ready-pl
namespace: hub-policies
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: odf-ready-binding
namespace: hub-policies
placementRef:
name: odf-ready-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: odf-ready-validation
apiGroup: policy.open-cluster-management.io
kind: Policy
odfSubscription.yaml
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: odf-operator
namespace: openshift-storage
spec:
channel: "stable-4.18"
name: odf-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: odf-operator
namespace: openshift-storage
spec:
channel: "stable-4.18"
name: odf-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
storageCluster.yaml
---
apiVersion: ocs.openshift.io/v1
kind: StorageCluster
metadata:
name: ocs-storagecluster
namespace: openshift-storage
annotations:
argocd.argoproj.io/sync-wave: "-2"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
manageNodes: false
resources:
mds:
limits:
cpu: "3"
memory: "8Gi"
requests:
cpu: "3"
memory: "8Gi"
monDataDirHostPath: /var/lib/rook
storageDeviceSets:
- count: 1 # <-- Modify count to desired value. For each set of 3 disks increment the count by 1.
dataPVCTemplate:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "600Gi" # <-- This should be changed as per storage size. Minimum 100 GiB and Maximum 4 TiB
storageClassName: "local-sc" # match this with the storage block created at the LSO step
volumeMode: Block
name: ocs-deviceset
placement: {}
portable: false
replica: 3
resources:
limits:
cpu: "2"
memory: "5Gi"
requests:
cpu: "2"
memory: "5Gi"
---
apiVersion: ocs.openshift.io/v1
kind: StorageCluster
metadata:
name: ocs-storagecluster
namespace: openshift-storage
annotations:
argocd.argoproj.io/sync-wave: "-2"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
manageNodes: false
resources:
mds:
limits:
cpu: "3"
memory: "8Gi"
requests:
cpu: "3"
memory: "8Gi"
monDataDirHostPath: /var/lib/rook
storageDeviceSets:
- count: 1 # <-- Modify count to desired value. For each set of 3 disks increment the count by 1.
dataPVCTemplate:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "600Gi" # <-- This should be changed as per storage size. Minimum 100 GiB and Maximum 4 TiB
storageClassName: "local-sc" # match this with the storage block created at the LSO step
volumeMode: Block
name: ocs-deviceset
placement: {}
portable: false
replica: 3
resources:
limits:
cpu: "2"
memory: "5Gi"
requests:
cpu: "2"
memory: "5Gi"
5.17.3. GitOps Operator および GitOps ZTP のリファレンス YAML リンクのコピーリンクがクリップボードにコピーされました!
addPluginsPolicy.yaml
# This content creates a policy which installs the necessary argocd
# plugins.
---
apiVersion: v1
kind: Namespace
metadata:
name: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
name: default
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
clusterSet: default
---
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
annotations:
policy.open-cluster-management.io/categories: CM Configuration Management
policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
policy.open-cluster-management.io/description: ""
policy.open-cluster-management.io/standards: NIST SP 800-53
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: ztp-argocd-plugins-installer
namespace: hub-policies
spec:
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: ztp-argocd-plugins-cp
spec:
remediationAction: enforce
severity: high
object-templates:
- complianceType: musthave
objectDefinition:
apiVersion: argoproj.io/v1beta1
kind: ArgoCD
metadata:
name: openshift-gitops
namespace: openshift-gitops
spec:
controller:
resources:
limits:
cpu: "16"
memory: 32Gi
requests:
cpu: "1"
memory: 2Gi
kustomizeBuildOptions: "--enable-alpha-plugins"
repo:
resources:
limits:
cpu: "8"
memory: "16Gi"
requests:
cpu: "1"
memory: "2Gi"
volumes:
- name: "kustomize"
emptyDir: {}
initContainers:
- name: "kustomize-plugin"
command:
- "/exportkustomize.sh"
args:
- "/.config"
imagePullPolicy: "Always"
volumeMounts:
- name: "kustomize"
mountPath: "/.config"
terminationMessagePolicy: "File"
terminationMessagePath: "/dev/termination-log"
image: "registry.redhat.io/openshift4/ztp-site-generate-rhel8:v4.19"
- name: "policy-generator-install"
image: "registry.redhat.io/rhacm2/multicluster-operators-subscription-rhel9:v2.13"
imagePullPolicy: "Always"
volumeMounts:
- name: "kustomize"
mountPath: "/.config"
args:
- "-c"
- "mkdir -p /.config/kustomize/plugin/policy.open-cluster-management.io/v1/policygenerator && cp /policy-generator/PolicyGenerator-not-fips-compliant /.config/kustomize/plugin/policy.open-cluster-management.io/v1/policygenerator/PolicyGenerator"
command:
- "/bin/bash"
volumeMounts:
- name: "kustomize"
mountPath: "/.config"
env:
- name: "ARGOCD_EXEC_TIMEOUT"
value: "360s"
- name: "KUSTOMIZE_PLUGIN_HOME"
value: "/.config/kustomize/plugin"
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
name: ztp-argo-plugins-pl
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
name: ztp-argo-plugins-binding
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
placementRef:
name: ztp-argo-plugins-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: ztp-argocd-plugins-installer
apiGroup: policy.open-cluster-management.io
kind: Policy
# This content creates a policy which installs the necessary argocd
# plugins.
---
apiVersion: v1
kind: Namespace
metadata:
name: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
name: default
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
clusterSet: default
---
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
annotations:
policy.open-cluster-management.io/categories: CM Configuration Management
policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
policy.open-cluster-management.io/description: ""
policy.open-cluster-management.io/standards: NIST SP 800-53
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: ztp-argocd-plugins-installer
namespace: hub-policies
spec:
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: ztp-argocd-plugins-cp
spec:
remediationAction: enforce
severity: high
object-templates:
- complianceType: musthave
objectDefinition:
apiVersion: argoproj.io/v1beta1
kind: ArgoCD
metadata:
name: openshift-gitops
namespace: openshift-gitops
spec:
controller:
resources:
limits:
cpu: "16"
memory: 32Gi
requests:
cpu: "1"
memory: 2Gi
kustomizeBuildOptions: "--enable-alpha-plugins"
repo:
resources:
limits:
cpu: "8"
memory: "16Gi"
requests:
cpu: "1"
memory: "2Gi"
volumes:
- name: "kustomize"
emptyDir: {}
initContainers:
- name: "kustomize-plugin"
command:
- "/exportkustomize.sh"
args:
- "/.config"
imagePullPolicy: "Always"
volumeMounts:
- name: "kustomize"
mountPath: "/.config"
terminationMessagePolicy: "File"
terminationMessagePath: "/dev/termination-log"
image: "registry.redhat.io/openshift4/ztp-site-generate-rhel8:v4.19"
- name: "policy-generator-install"
image: "registry.redhat.io/rhacm2/multicluster-operators-subscription-rhel9:v2.13"
imagePullPolicy: "Always"
volumeMounts:
- name: "kustomize"
mountPath: "/.config"
args:
- "-c"
- "mkdir -p /.config/kustomize/plugin/policy.open-cluster-management.io/v1/policygenerator && cp /policy-generator/PolicyGenerator-not-fips-compliant /.config/kustomize/plugin/policy.open-cluster-management.io/v1/policygenerator/PolicyGenerator"
command:
- "/bin/bash"
volumeMounts:
- name: "kustomize"
mountPath: "/.config"
env:
- name: "ARGOCD_EXEC_TIMEOUT"
value: "360s"
- name: "KUSTOMIZE_PLUGIN_HOME"
value: "/.config/kustomize/plugin"
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
name: ztp-argo-plugins-pl
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
name: ztp-argo-plugins-binding
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
placementRef:
name: ztp-argo-plugins-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: ztp-argocd-plugins-installer
apiGroup: policy.open-cluster-management.io
kind: Policy
app-project.yaml
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: infra
namespace: openshift-gitops
spec:
destinations:
- namespace: '*'
server: '*'
sourceRepos:
- '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'
- group: ""
kind: Namespace
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: infra
namespace: openshift-gitops
spec:
destinations:
- namespace: '*'
server: '*'
sourceRepos:
- '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'
- group: ""
kind: Namespace
argocd-application.yaml
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: hub-config
namespace: openshift-gitops
spec:
destination:
server: "https://kubernetes.default.svc"
namespace: default
project: infra
syncPolicy:
automated:
allowEmpty: true
selfHeal: true
prune: true
source:
path: "telco-hub/configuration"
repoURL: "https://github.com/openshift-kni/telco-reference.git"
targetRevision: "main"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: hub-config
namespace: openshift-gitops
spec:
destination:
server: "https://kubernetes.default.svc"
namespace: default
project: infra
syncPolicy:
automated:
allowEmpty: true
selfHeal: true
prune: true
source:
path: "telco-hub/configuration"
repoURL: "https://github.com/openshift-kni/telco-reference.git"
targetRevision: "main"
argocd-tls-certs-cm.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-tls-certs-cm
namespace: openshift-gitops
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
data: {}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-tls-certs-cm
namespace: openshift-gitops
labels:
app.kubernetes.io/name: argocd-cm
app.kubernetes.io/part-of: argocd
data: {}
argocd-ssh-known-hosts-cm.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-ssh-known-hosts-cm
namespace: openshift-gitops
data:
ssh_known_hosts: |
#############################################################
# by default empty known hosts, because of usual #
# disconnected environments. #
# #
# Manually add needed ssh known hosts: #
# example: $> ssh-keyscan my-github.com #
# Copy the output here
#############################################################
# my-github.com sh-rsa AAAAB3NzaC1y...J4i36KV/aCl4Ixz
# my-github.com ecdsa-sha2-nistp256...GGtLKqmwLLeKhe6xgc=
# my-github-com ssh-ed25519 AAAAC3N...lNrvWjBQ2u
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-ssh-known-hosts-cm
namespace: openshift-gitops
data:
ssh_known_hosts: |
#############################################################
# by default empty known hosts, because of usual #
# disconnected environments. #
# #
# Manually add needed ssh known hosts: #
# example: $> ssh-keyscan my-github.com #
# Copy the output here
#############################################################
# my-github.com sh-rsa AAAAB3NzaC1y...J4i36KV/aCl4Ixz
# my-github.com ecdsa-sha2-nistp256...GGtLKqmwLLeKhe6xgc=
# my-github-com ssh-ed25519 AAAAC3N...lNrvWjBQ2u
clusterrole.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hub-rds-argocd
labels:
rbac.authorization.k8s.io/aggregate-to-ocm-cluster-manager-admin: "true"
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- '*'
- apiGroups:
- local.storage.openshift.io
resources:
- localvolumes
verbs:
- '*'
- apiGroups:
- ocs.openshift.io
resources:
- storageclusters
verbs:
- '*'
- apiGroups:
- objectbucket.io
resources:
- objectbucketclaims
verbs:
- '*'
- apiGroups:
- operator.open-cluster-management.io
resources:
- multiclusterhubs
verbs:
- '*'
- apiGroups:
- metal3.io
resources:
- provisionings
verbs:
- '*'
- apiGroups:
- agent-install.openshift.io
resources:
- agentserviceconfigs
verbs:
- '*'
- apiGroups:
- search.open-cluster-management.io
resources:
- searches
verbs:
- '*'
- apiGroups:
- observability.open-cluster-management.io
resources:
- multiclusterobservabilities
verbs:
- '*'
- apiGroups:
- policy.open-cluster-management.io
resources:
- policies
- placementbindings
verbs:
- '*'
- apiGroups:
- cluster.open-cluster-management.io
resources:
- managedclustersetbindings
- managedclustersets/bind
- managedclustersets/join
- placements
verbs:
- '*'
- apiGroups:
- multicluster.openshift.io
resources:
- multiclusterengines
verbs:
- 'patch'
- 'get'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: hub-rds-argocd
labels:
rbac.authorization.k8s.io/aggregate-to-ocm-cluster-manager-admin: "true"
rules:
- apiGroups:
- ""
resources:
- secrets
verbs:
- '*'
- apiGroups:
- local.storage.openshift.io
resources:
- localvolumes
verbs:
- '*'
- apiGroups:
- ocs.openshift.io
resources:
- storageclusters
verbs:
- '*'
- apiGroups:
- objectbucket.io
resources:
- objectbucketclaims
verbs:
- '*'
- apiGroups:
- operator.open-cluster-management.io
resources:
- multiclusterhubs
verbs:
- '*'
- apiGroups:
- metal3.io
resources:
- provisionings
verbs:
- '*'
- apiGroups:
- agent-install.openshift.io
resources:
- agentserviceconfigs
verbs:
- '*'
- apiGroups:
- search.open-cluster-management.io
resources:
- searches
verbs:
- '*'
- apiGroups:
- observability.open-cluster-management.io
resources:
- multiclusterobservabilities
verbs:
- '*'
- apiGroups:
- policy.open-cluster-management.io
resources:
- policies
- placementbindings
verbs:
- '*'
- apiGroups:
- cluster.open-cluster-management.io
resources:
- managedclustersetbindings
- managedclustersets/bind
- managedclustersets/join
- placements
verbs:
- '*'
- apiGroups:
- multicluster.openshift.io
resources:
- multiclusterengines
verbs:
- 'patch'
- 'get'
clusterrolebinding.yaml
--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: gitops-hub-rds-binding roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: hub-rds-argocd subjects: - kind: ServiceAccount name: openshift-gitops-argocd-application-controller namespace: openshift-gitops
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-hub-rds-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: hub-rds-argocd
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
gitopsNS.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-gitops-operator
labels:
openshift.io/cluster-monitoring: "true"
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-gitops-operator
labels:
openshift.io/cluster-monitoring: "true"
gitopsOperatorGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-gitops-operator namespace: openshift-gitops-operator spec: upgradeStrategy: Default
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: openshift-gitops-operator
namespace: openshift-gitops-operator
spec:
upgradeStrategy: Default
gitopsSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: openshift-gitops-operator namespace: openshift-gitops-operator spec: channel: gitops-1.16 installPlanApproval: Automatic name: openshift-gitops-operator source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: openshift-gitops-operator
namespace: openshift-gitops-operator
spec:
channel: gitops-1.16
installPlanApproval: Automatic
name: openshift-gitops-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
ztp-repo.yaml
---
apiVersion: v1
kind: Secret
metadata:
name: ztp-repo
namespace: openshift-gitops
labels:
argocd.argoproj.io/secret-type: repository
stringData:
# use following for ssh repo access
url: git@gitlab.example.com:namespace/repo.git
insecure: "false"
sshPrivateKey: |
-----BEGIN OPENSSH PRIVATE KEY-----
INSERT PRIVATE KEY
-----END OPENSSH PRIVATE KEY-----
# uncomment and use following for https repo access
# url: https://gitlab.example.com/namespace/repo
# insecure: "false"
# password: password
# username: username
# forceHttpBasicAuth: "true"
# more examples: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-repositories-yaml/
---
apiVersion: v1
kind: Secret
metadata:
name: ztp-repo
namespace: openshift-gitops
labels:
argocd.argoproj.io/secret-type: repository
stringData:
# use following for ssh repo access
url: git@gitlab.example.com:namespace/repo.git
insecure: "false"
sshPrivateKey: |
-----BEGIN OPENSSH PRIVATE KEY-----
INSERT PRIVATE KEY
-----END OPENSSH PRIVATE KEY-----
# uncomment and use following for https repo access
# url: https://gitlab.example.com/namespace/repo
# insecure: "false"
# password: password
# username: username
# forceHttpBasicAuth: "true"
# more examples: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-repositories-yaml/
app-project.yaml
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: infra
namespace: openshift-gitops
spec:
destinations:
- namespace: '*'
server: '*'
sourceRepos:
- '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'
- group: ""
kind: Namespace
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: infra
namespace: openshift-gitops
spec:
destinations:
- namespace: '*'
server: '*'
sourceRepos:
- '*'
clusterResourceWhitelist:
- group: '*'
kind: '*'
- group: ""
kind: Namespace
clusters-app.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: clusters-sub
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: clusters
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
destination:
server: https://kubernetes.default.svc
namespace: clusters-sub
project: ztp-app-project
source:
path: ztp/gitops-subscriptions/argocd/example/siteconfig
repoURL: https://github.com/openshift-kni/cnf-features-deploy
targetRevision: master
# uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where
# the sitconfig.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server
# plugin:
# name: kustomize-with-local-plugins
ignoreDifferences: # recommended way to allow ACM controller to manage its fields. alternative approach documented below (1)
- group: cluster.open-cluster-management.io
kind: ManagedCluster
managedFieldsManagers:
- controller
# (1) alternatively you can choose to ignore a specific path like so (replace managedFieldsManagers with jsonPointers)
# jsonPointers:
# - /metadata/labels/cloud
# - /metadata/labels/vendor
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=background
- RespectIgnoreDifferences=true
---
apiVersion: v1
kind: Namespace
metadata:
name: clusters-sub
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: clusters
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
destination:
server: https://kubernetes.default.svc
namespace: clusters-sub
project: ztp-app-project
source:
path: ztp/gitops-subscriptions/argocd/example/siteconfig
repoURL: https://github.com/openshift-kni/cnf-features-deploy
targetRevision: master
# uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where
# the sitconfig.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server
# plugin:
# name: kustomize-with-local-plugins
ignoreDifferences: # recommended way to allow ACM controller to manage its fields. alternative approach documented below (1)
- group: cluster.open-cluster-management.io
kind: ManagedCluster
managedFieldsManagers:
- controller
# (1) alternatively you can choose to ignore a specific path like so (replace managedFieldsManagers with jsonPointers)
# jsonPointers:
# - /metadata/labels/cloud
# - /metadata/labels/vendor
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=background
- RespectIgnoreDifferences=true
gitops-cluster-rolebinding.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-cluster
annotations:
argocd.argoproj.io/sync-wave: "100"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-cluster
annotations:
argocd.argoproj.io/sync-wave: "100"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
gitops-policy-rolebinding.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-policy
annotations:
argocd.argoproj.io/sync-wave: "100"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: open-cluster-management:cluster-manager-admin
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-policy
annotations:
argocd.argoproj.io/sync-wave: "100"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: open-cluster-management:cluster-manager-admin
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
kustomization.yaml
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- policies-app-project.yaml
- gitops-policy-rolebinding.yaml
- gitops-cluster-rolebinding.yaml
- clusters-app.yaml
- policies-app.yaml
metadata:
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- policies-app-project.yaml
- gitops-policy-rolebinding.yaml
- gitops-cluster-rolebinding.yaml
- clusters-app.yaml
- policies-app.yaml
metadata:
annotations:
argocd.argoproj.io/sync-wave: "100"
policies-app-project.yaml
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: policy-app-project
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
clusterResourceWhitelist:
- group: ''
kind: Namespace
- group: 'hive.openshift.io'
kind: ClusterImageSet
destinations:
- namespace: 'ztp*'
server: '*'
- namespace: 'policies-sub'
server: '*'
namespaceResourceWhitelist:
- group: ''
kind: ConfigMap
- group: ''
kind: Namespace
- group: 'apps.open-cluster-management.io'
kind: PlacementRule
- group: 'policy.open-cluster-management.io'
kind: Policy
- group: 'policy.open-cluster-management.io'
kind: PlacementBinding
- group: 'ran.openshift.io'
kind: PolicyGenTemplate
- group: cluster.open-cluster-management.io
kind: Placement
- group: policy.open-cluster-management.io
kind: PolicyGenerator
- group: policy.open-cluster-management.io
kind: PolicySet
- group: cluster.open-cluster-management.io
kind: ManagedClusterSetBinding
sourceRepos:
- '*'
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: policy-app-project
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
clusterResourceWhitelist:
- group: ''
kind: Namespace
- group: 'hive.openshift.io'
kind: ClusterImageSet
destinations:
- namespace: 'ztp*'
server: '*'
- namespace: 'policies-sub'
server: '*'
namespaceResourceWhitelist:
- group: ''
kind: ConfigMap
- group: ''
kind: Namespace
- group: 'apps.open-cluster-management.io'
kind: PlacementRule
- group: 'policy.open-cluster-management.io'
kind: Policy
- group: 'policy.open-cluster-management.io'
kind: PlacementBinding
- group: 'ran.openshift.io'
kind: PolicyGenTemplate
- group: cluster.open-cluster-management.io
kind: Placement
- group: policy.open-cluster-management.io
kind: PolicyGenerator
- group: policy.open-cluster-management.io
kind: PolicySet
- group: cluster.open-cluster-management.io
kind: ManagedClusterSetBinding
sourceRepos:
- '*'
policies-app.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: policies-sub
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: policies
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
destination:
server: https://kubernetes.default.svc
namespace: policies-sub
project: policy-app-project
source:
path: ztp/gitops-subscriptions/argocd/example/policygentemplates
repoURL: https://github.com/openshift-kni/cnf-features-deploy
targetRevision: master
# uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where
# the policyGenTemplate.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server
# plugin:
# name: kustomize-with-local-plugins
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
---
apiVersion: v1
kind: Namespace
metadata:
name: policies-sub
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: policies
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
destination:
server: https://kubernetes.default.svc
namespace: policies-sub
project: policy-app-project
source:
path: ztp/gitops-subscriptions/argocd/example/policygentemplates
repoURL: https://github.com/openshift-kni/cnf-features-deploy
targetRevision: master
# uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where
# the policyGenTemplate.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server
# plugin:
# name: kustomize-with-local-plugins
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
5.17.4. レジストリーのリファレンス YAML リンクのコピーリンクがクリップボードにコピーされました!
catalog-source.yaml
---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: redhat-operators-disconnected
namespace: openshift-marketplace
spec:
displayName: Disconnected Red Hat Operators
image: <registry.example.com:8443>/openshift-marketplace/redhat-operators-disconnected:v4.19
publisher: Red Hat
sourceType: grpc
---
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: redhat-operators-disconnected
namespace: openshift-marketplace
spec:
displayName: Disconnected Red Hat Operators
image: <registry.example.com:8443>/openshift-marketplace/redhat-operators-disconnected:v4.19
publisher: Red Hat
sourceType: grpc
idms-operator.yaml
---
# This ImageDigestMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image digest mirrors for operator images required by telco workloads
apiVersion: config.openshift.io/v1
kind: ImageDigestMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: idms-operator-0
spec:
imageDigestMirrors:
- mirrors:
- <registry.example.com:8443>/oadp
source: registry.redhat.io/oadp
- mirrors:
- <registry.example.com:8443>/multicluster-engine
source: registry.redhat.io/multicluster-engine
- mirrors:
- <registry.example.com:8443>/rhel8
source: registry.redhat.io/rhel8
- mirrors:
- <registry.example.com:8443>/odf4
source: registry.redhat.io/odf4
- mirrors:
- <registry.example.com:8443>/rhel9
source: registry.redhat.io/rhel9
- mirrors:
- <registry.example.com:8443>/rhceph
source: registry.redhat.io/rhceph
- mirrors:
- <registry.example.com:8443>/openshift-gitops-1
source: registry.redhat.io/openshift-gitops-1
- mirrors:
- <registry.example.com:8443>/rh-sso-7
source: registry.redhat.io/rh-sso-7
- mirrors:
- <registry.example.com:8443>/rhacm2
source: registry.redhat.io/rhacm2
- mirrors:
- <registry.example.com:8443>/openshift4
source: registry.redhat.io/openshift4
---
# This ImageDigestMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image digest mirrors for operator images required by telco workloads
apiVersion: config.openshift.io/v1
kind: ImageDigestMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: idms-operator-0
spec:
imageDigestMirrors:
- mirrors:
- <registry.example.com:8443>/oadp
source: registry.redhat.io/oadp
- mirrors:
- <registry.example.com:8443>/multicluster-engine
source: registry.redhat.io/multicluster-engine
- mirrors:
- <registry.example.com:8443>/rhel8
source: registry.redhat.io/rhel8
- mirrors:
- <registry.example.com:8443>/odf4
source: registry.redhat.io/odf4
- mirrors:
- <registry.example.com:8443>/rhel9
source: registry.redhat.io/rhel9
- mirrors:
- <registry.example.com:8443>/rhceph
source: registry.redhat.io/rhceph
- mirrors:
- <registry.example.com:8443>/openshift-gitops-1
source: registry.redhat.io/openshift-gitops-1
- mirrors:
- <registry.example.com:8443>/rh-sso-7
source: registry.redhat.io/rh-sso-7
- mirrors:
- <registry.example.com:8443>/rhacm2
source: registry.redhat.io/rhacm2
- mirrors:
- <registry.example.com:8443>/openshift4
source: registry.redhat.io/openshift4
idms-release.yaml
---
# This ImageDigestMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image digest mirrors for OpenShift release images
apiVersion: config.openshift.io/v1
kind: ImageDigestMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: idms-release-0
spec:
imageDigestMirrors:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev
source: quay.io/openshift-release-dev
---
# This ImageDigestMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image digest mirrors for OpenShift release images
apiVersion: config.openshift.io/v1
kind: ImageDigestMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: idms-release-0
spec:
imageDigestMirrors:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev
source: quay.io/openshift-release-dev
image-config.yaml
---
apiVersion: config.openshift.io/v1
kind: Image
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-50"
name: cluster
spec:
additionalTrustedCA:
name: registry-ca
---
apiVersion: config.openshift.io/v1
kind: Image
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-50"
name: cluster
spec:
additionalTrustedCA:
name: registry-ca
itms-generic.yaml
---
# This ImageTagMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image tag mirrors for generic base images (UBI, RHEL)
apiVersion: config.openshift.io/v1
kind: ImageTagMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: itms-generic-0
spec:
imageTagMirrors:
- mirrors:
- <registry.example.com:8443>/ubi8
source: registry.redhat.io/ubi8
- mirrors:
- <registry.example.com:8443>/openshift4
source: registry.redhat.io/openshift4
- mirrors:
- <registry.example.com:8443>/rhel8
source: registry.redhat.io/rhel8
---
# This ImageTagMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image tag mirrors for generic base images (UBI, RHEL)
apiVersion: config.openshift.io/v1
kind: ImageTagMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: itms-generic-0
spec:
imageTagMirrors:
- mirrors:
- <registry.example.com:8443>/ubi8
source: registry.redhat.io/ubi8
- mirrors:
- <registry.example.com:8443>/openshift4
source: registry.redhat.io/openshift4
- mirrors:
- <registry.example.com:8443>/rhel8
source: registry.redhat.io/rhel8
itms-release.yaml
---
# This ImageTagMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image tag mirrors for OpenShift release images
apiVersion: config.openshift.io/v1
kind: ImageTagMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: itms-release-0
spec:
imageTagMirrors:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev
source: quay.io/openshift-release-dev
---
# This ImageTagMirrorSet was automatically generated by oc mirror.
# Example command used:
# oc mirror -c imageset-config-4-19.yaml --workspace file://oc-mirror-workspace-4-19 docker://registry.example.com:8443 --v2
# It contains image tag mirrors for OpenShift release images
apiVersion: config.openshift.io/v1
kind: ImageTagMirrorSet
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
name: itms-release-0
spec:
imageTagMirrors:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev
source: quay.io/openshift-release-dev
kustomization.yaml
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- policies-app-project.yaml
- gitops-policy-rolebinding.yaml
- gitops-cluster-rolebinding.yaml
- clusters-app.yaml
- policies-app.yaml
metadata:
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- policies-app-project.yaml
- gitops-policy-rolebinding.yaml
- gitops-cluster-rolebinding.yaml
- clusters-app.yaml
- policies-app.yaml
metadata:
annotations:
argocd.argoproj.io/sync-wave: "100"
operator-hub.yaml
---
apiVersion: config.openshift.io/v1
kind: OperatorHub
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
name: cluster
spec:
disableAllDefaultSources: true
---
apiVersion: config.openshift.io/v1
kind: OperatorHub
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-10"
name: cluster
spec:
disableAllDefaultSources: true
registry-ca.yaml
---
apiVersion: v1
kind: ConfigMap
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-40"
name: registry-ca
namespace: openshift-config
data:
# important: keep the format "url..port"
<registry.example.com..8443>: |
-----BEGIN CERTIFICATE-----
MIIGcjCCBFqgAwIBAgIFICIE...
-----END CERTIFICATE-----
---
apiVersion: v1
kind: ConfigMap
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-40"
name: registry-ca
namespace: openshift-config
data:
# important: keep the format "url..port"
<registry.example.com..8443>: |
-----BEGIN CERTIFICATE-----
MIIGcjCCBFqgAwIBAgIFICIE...
-----END CERTIFICATE-----
5.17.5. ログのリファレンス YAML リンクのコピーリンクがクリップボードにコピーされました!
clusterLogForwarder.yaml
---
# ClusterLogForwarder for Telco Hub
# Forwards audit and infrastructure logs to Kafka with hub-specific labeling
apiVersion: observability.openshift.io/v1
kind: ClusterLogForwarder
metadata:
name: instance
namespace: openshift-logging
annotations:
# Deploy after cluster logging operator is ready
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
# Ignore controller-managed status differences in ArgoCD
argocd.argoproj.io/compare-options: IgnoreExtraneous
spec:
# Filters add metadata labels to log records for identification
filters:
- name: telco-hub-labels
type: openshiftLabels
# Add these labels to all forwarded log records
openshiftLabels:
cluster-role: hub # Identifies this as hub cluster logs
environment: production # Environment designation
telco-component: management # Component categorization
# Output destinations for log forwarding
outputs:
- name: hub-kafka-output
type: kafka
kafka:
# Kafka broker endpoint -> update for your environment!
url: tcp://$kafka-server:9092/endpoint
# Pipelines define which logs go where with what processing
pipelines:
- name: telco-hub-logs
# Log types to forward (excludes application logs for hub)
inputRefs:
- audit # OpenShift API audit logs
- infrastructure # Container runtime and system logs
# Where to send the logs
outputRefs:
- hub-kafka-output
# Apply labeling filter to identify log source
filterRefs:
- telco-hub-labels
# Service account for log collection
serviceAccount:
name: collector
---
# ClusterLogForwarder for Telco Hub
# Forwards audit and infrastructure logs to Kafka with hub-specific labeling
apiVersion: observability.openshift.io/v1
kind: ClusterLogForwarder
metadata:
name: instance
namespace: openshift-logging
annotations:
# Deploy after cluster logging operator is ready
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
# Ignore controller-managed status differences in ArgoCD
argocd.argoproj.io/compare-options: IgnoreExtraneous
spec:
# Filters add metadata labels to log records for identification
filters:
- name: telco-hub-labels
type: openshiftLabels
# Add these labels to all forwarded log records
openshiftLabels:
cluster-role: hub # Identifies this as hub cluster logs
environment: production # Environment designation
telco-component: management # Component categorization
# Output destinations for log forwarding
outputs:
- name: hub-kafka-output
type: kafka
kafka:
# Kafka broker endpoint -> update for your environment!
url: tcp://$kafka-server:9092/endpoint
# Pipelines define which logs go where with what processing
pipelines:
- name: telco-hub-logs
# Log types to forward (excludes application logs for hub)
inputRefs:
- audit # OpenShift API audit logs
- infrastructure # Container runtime and system logs
# Where to send the logs
outputRefs:
- hub-kafka-output
# Apply labeling filter to identify log source
filterRefs:
- telco-hub-labels
# Service account for log collection
serviceAccount:
name: collector
clusterLogNS.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-logging
annotations:
workload.openshift.io/allowed: management
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-logging
annotations:
workload.openshift.io/allowed: management
clusterLogOperGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: cluster-logging namespace: openshift-logging spec: targetNamespaces: - openshift-logging
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: cluster-logging
namespace: openshift-logging
spec:
targetNamespaces:
- openshift-logging
clusterLogServiceAccount.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: collector
namespace: openshift-logging
annotations:
argocd.argoproj.io/sync-wave: "2"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: collector
namespace: openshift-logging
annotations:
argocd.argoproj.io/sync-wave: "2"
clusterLogServiceAccountAuditBinding.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logcollector-audit-logs-binding
annotations:
argocd.argoproj.io/sync-wave: "2"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-audit-logs
subjects:
- kind: ServiceAccount
name: collector
namespace: openshift-logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logcollector-audit-logs-binding
annotations:
argocd.argoproj.io/sync-wave: "2"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-audit-logs
subjects:
- kind: ServiceAccount
name: collector
namespace: openshift-logging
clusterLogServiceAccountInfrastructureBinding.yaml
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logcollector-infrastructure-logs-binding
annotations:
argocd.argoproj.io/sync-wave: "2"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-infrastructure-logs
subjects:
- kind: ServiceAccount
name: collector
namespace: openshift-logging
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: logcollector-infrastructure-logs-binding
annotations:
argocd.argoproj.io/sync-wave: "2"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: collect-infrastructure-logs
subjects:
- kind: ServiceAccount
name: collector
namespace: openshift-logging
clusterLogSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: cluster-logging namespace: openshift-logging spec: channel: "stable-6.2" name: cluster-logging source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: cluster-logging
namespace: openshift-logging
spec:
channel: "stable-6.2"
name: cluster-logging
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
5.17.6. インストールのリファレンス YAML リンクのコピーリンクがクリップボードにコピーされました!
agent-config.yaml
---
apiVersion: v1beta1
kind: AgentConfig
metadata:
name: hub # need to match the same name put in install-config
rendezvousIP: 192.168.125.20 # one of the master IP
# Replace the fields below with your network details
hosts:
- hostname: hub-ctl-0
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:01
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:01
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::20
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
- hostname: hub-ctl-1
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:02
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:02
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::21
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
- hostname: hub-ctl-2
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:03
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:03
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::22
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
---
apiVersion: v1beta1
kind: AgentConfig
metadata:
name: hub # need to match the same name put in install-config
rendezvousIP: 192.168.125.20 # one of the master IP
# Replace the fields below with your network details
hosts:
- hostname: hub-ctl-0
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:01
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:01
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::20
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
- hostname: hub-ctl-1
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:02
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:02
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::21
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
- hostname: hub-ctl-2
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:03
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:03
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::22
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
install-config.yaml
---
apiVersion: v1
metadata:
name: hub # replace with your hub name
baseDomain: example.com # replace with your domain name
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
replicas: 3
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
- cidr: fd02::/48
hostPrefix: 64
machineNetwork:
- cidr: 192.168.125.0/24 # replace with your machine network CIDR
- cidr: fd01::/64
networkType: OVNKubernetes
serviceNetwork:
- 172.30.0.0/16
- fd03::/112
# Replace the fields below with your network details
platform:
baremetal:
provisioningNetwork: "Disabled"
apiVIPs:
- 192.168.125.10
- fd01::10
ingressVIPs:
- 192.168.125.11
- fd01::11
# Replace <registry.example.com:8443> with the mirror registry's address.
imageDigestSources:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev/ocp-release
source: quay.io/openshift-release-dev/ocp-release
- mirrors:
- <registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev
source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
# Add the mirror registry SSL certificate chain up to the CA itself.
additionalTrustBundle: |
-----BEGIN CERTIFICATE-----
MIID7jCCAtagAwXXX...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwIBAgIUcXQpXXX...
-----END CERTIFICATE-----
# Add the mirror registry credentials to the pull secret.
pullSecret: '{"auths":{"<registry.example.com:8443>":{"auth": "aW5pdDo0R1XXXXXjdCbUoweUNuMWI1OTZBMmhkcEhjMw==","email": "user@redhat.com"},...}}}'
# Add the SSH public key to connect to the OCP nodes
sshKey: |
ssh-rsa AAAAB3NzaC1yc2EA...
---
apiVersion: v1
metadata:
name: hub # replace with your hub name
baseDomain: example.com # replace with your domain name
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
replicas: 3
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
- cidr: fd02::/48
hostPrefix: 64
machineNetwork:
- cidr: 192.168.125.0/24 # replace with your machine network CIDR
- cidr: fd01::/64
networkType: OVNKubernetes
serviceNetwork:
- 172.30.0.0/16
- fd03::/112
# Replace the fields below with your network details
platform:
baremetal:
provisioningNetwork: "Disabled"
apiVIPs:
- 192.168.125.10
- fd01::10
ingressVIPs:
- 192.168.125.11
- fd01::11
# Replace <registry.example.com:8443> with the mirror registry's address.
imageDigestSources:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev/ocp-release
source: quay.io/openshift-release-dev/ocp-release
- mirrors:
- <registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev
source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
# Add the mirror registry SSL certificate chain up to the CA itself.
additionalTrustBundle: |
-----BEGIN CERTIFICATE-----
MIID7jCCAtagAwXXX...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwIBAgIUcXQpXXX...
-----END CERTIFICATE-----
# Add the mirror registry credentials to the pull secret.
pullSecret: '{"auths":{"<registry.example.com:8443>":{"auth": "aW5pdDo0R1XXXXXjdCbUoweUNuMWI1OTZBMmhkcEhjMw==","email": "user@redhat.com"},...}}}'
# Add the SSH public key to connect to the OCP nodes
sshKey: |
ssh-rsa AAAAB3NzaC1yc2EA...