5.17. hub 集群引用配置 CR
以下是在 4.19 中电信管理 hub 参考配置的所有自定义资源(CR)的完整 YAML 引用。
5.17.1. RHACM 参考 YAML 复制链接链接已复制到粘贴板!
acmAgentServiceConfig.yaml
--- apiVersion: agent-install.openshift.io/v1beta1 kind: AgentServiceConfig metadata: name: agent annotations: argocd.argoproj.io/sync-wave: "7" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true spec: databaseStorage: storageClassName: # your-fs-storageclass-here accessModes: - ReadWriteOnce resources: requests: storage: 20Gi filesystemStorage: storageClassName: # your-fs-storageclass-here accessModes: - ReadWriteOnce resources: requests: storage: 20Gi imageStorage: storageClassName: # your-fs-storageclass-here accessModes: - ReadWriteOnce resources: requests: storage: 100Gi mirrorRegistryRef: name: mirror-registry-config osImages: # Replace <http-server-address:port> with the address of the local web server that stores the RHCOS images. # The images can be downloaded from "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/". - cpuArchitecture: "x86_64" openshiftVersion: "4.17" rootFSUrl: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live-rootfs.x86_64.img url: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live.x86_64.iso version: "417.94.202409121747-0" - cpuArchitecture: "x86_64" openshiftVersion: "4.18" rootFSUrl: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live-rootfs.x86_64.img url: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live.x86_64.iso version: "418.94.202502100215-0" - cpuArchitecture: "x86_64" openshiftVersion: "4.19" rootFSUrl: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-rootfs.x86_64.img url: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-iso.x86_64.iso version: "9.6.20250530-0"
---
apiVersion: agent-install.openshift.io/v1beta1
kind: AgentServiceConfig
metadata:
name: agent
annotations:
argocd.argoproj.io/sync-wave: "7"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
databaseStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
filesystemStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
imageStorage:
storageClassName: # your-fs-storageclass-here
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Gi
mirrorRegistryRef:
name: mirror-registry-config
osImages:
# Replace <http-server-address:port> with the address of the local web server that stores the RHCOS images.
# The images can be downloaded from "https://mirror.openshift.com/pub/openshift-v4/x86_64/dependencies/rhcos/".
- cpuArchitecture: "x86_64"
openshiftVersion: "4.17"
rootFSUrl: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.17.0-x86_64-live.x86_64.iso
version: "417.94.202409121747-0"
- cpuArchitecture: "x86_64"
openshiftVersion: "4.18"
rootFSUrl: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.18.0-x86_64-live.x86_64.iso
version: "418.94.202502100215-0"
- cpuArchitecture: "x86_64"
openshiftVersion: "4.19"
rootFSUrl: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-rootfs.x86_64.img
url: http://<http-server-address:port>/rhcos-4.19.0-x86_64-live-iso.x86_64.iso
version: "9.6.20250530-0"
acmMCH.yaml
--- apiVersion: operator.open-cluster-management.io/v1 kind: MultiClusterHub metadata: annotations: argocd.argoproj.io/sync-wave: "4" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true installer.open-cluster-management.io/mce-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}' installer.open-cluster-management.io/oadp-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}' name: multiclusterhub namespace: open-cluster-management spec: availabilityConfig: High enableClusterBackup: false ingress: {} overrides: components: - configOverrides: {} enabled: true name: app-lifecycle - configOverrides: {} enabled: true name: cluster-lifecycle - configOverrides: {} enabled: true name: cluster-permission - configOverrides: {} enabled: true name: console - configOverrides: {} enabled: true name: grc - configOverrides: {} enabled: true name: insights - configOverrides: {} enabled: true name: multicluster-engine - configOverrides: {} enabled: true name: multicluster-observability - configOverrides: {} enabled: true name: search - configOverrides: {} enabled: true name: submariner-addon - configOverrides: {} enabled: true name: volsync - configOverrides: {} enabled: true name: cluster-backup - configOverrides: {} enabled: true name: siteconfig - configOverrides: {} enabled: false name: edge-manager-preview separateCertificateManagement: false --- apiVersion: cluster.open-cluster-management.io/v1beta2 kind: ManagedClusterSetBinding metadata: annotations: argocd.argoproj.io/sync-wave: "4" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true name: global namespace: openshift-storage spec: clusterSet: global
---
apiVersion: operator.open-cluster-management.io/v1
kind: MultiClusterHub
metadata:
annotations:
argocd.argoproj.io/sync-wave: "4"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
installer.open-cluster-management.io/mce-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}'
installer.open-cluster-management.io/oadp-subscription-spec: '{"source": "redhat-operators-disconnected", "installPlanApproval": "Automatic"}'
name: multiclusterhub
namespace: open-cluster-management
spec:
availabilityConfig: High
enableClusterBackup: false
ingress: {}
overrides:
components:
- configOverrides: {}
enabled: true
name: app-lifecycle
- configOverrides: {}
enabled: true
name: cluster-lifecycle
- configOverrides: {}
enabled: true
name: cluster-permission
- configOverrides: {}
enabled: true
name: console
- configOverrides: {}
enabled: true
name: grc
- configOverrides: {}
enabled: true
name: insights
- configOverrides: {}
enabled: true
name: multicluster-engine
- configOverrides: {}
enabled: true
name: multicluster-observability
- configOverrides: {}
enabled: true
name: search
- configOverrides: {}
enabled: true
name: submariner-addon
- configOverrides: {}
enabled: true
name: volsync
- configOverrides: {}
enabled: true
name: cluster-backup
- configOverrides: {}
enabled: true
name: siteconfig
- configOverrides: {}
enabled: false
name: edge-manager-preview
separateCertificateManagement: false
---
apiVersion: cluster.open-cluster-management.io/v1beta2
kind: ManagedClusterSetBinding
metadata:
annotations:
argocd.argoproj.io/sync-wave: "4"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: global
namespace: openshift-storage
spec:
clusterSet: global
acmMirrorRegistryCM.yaml
--- apiVersion: v1 kind: ConfigMap metadata: name: mirror-registry-config annotations: argocd.argoproj.io/sync-wave: "5" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true namespace: multicluster-engine labels: app: assisted-service data: # Add the mirror registry SSL certificate chain up to the CA itself. ca-bundle.crt: | -----BEGIN CERTIFICATE----- MIID7jCCAtagAwXXX... -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIDvTCCAqWgAwXXX... -----END CERTIFICATE----- # The registries.conf field has been populated using the registries.conf file found in "/etc/containers/registries.conf" on each node. # Replace <registry.example.com:8443> with the mirror registry's address. registries.conf: | unqualified-search-registries = ["registry.access.redhat.com", "docker.io"] [[registry]] prefix = "" location = "quay.io/openshift-release-dev" [[registry.mirror]] location = "<registry.example.com:8443>/openshift-release-dev" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "quay.io/openshift-release-dev/ocp-release" [[registry.mirror]] location = "<registry.example.com:8443>/openshift-release-dev/ocp-release" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev" [[registry.mirror]] location = "<registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/multicluster-engine" [[registry.mirror]] location = "<registry.example.com:8443>/multicluster-engine" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/odf4" [[registry.mirror]] location = "<registry.example.com:8443>/odf4" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/openshift4" [[registry.mirror]] location = "<registry.example.com:8443>/openshift4" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/rhacm2" [[registry.mirror]] location = "<registry.example.com:8443>/rhacm2" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/rhceph" [[registry.mirror]] location = "<registry.example.com:8443>/rhceph" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/rhel8" [[registry.mirror]] location = "<registry.example.com:8443>/rhel8" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/rhel9" [[registry.mirror]] location = "<registry.example.com:8443>/rhel9" pull-from-mirror = "digest-only" [[registry]] prefix = "" location = "registry.redhat.io/ubi8" [[registry.mirror]] location = "<registry.example.com:8443>/ubi8" pull-from-mirror = "tag-only"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: mirror-registry-config
annotations:
argocd.argoproj.io/sync-wave: "5"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
namespace: multicluster-engine
labels:
app: assisted-service
data:
# Add the mirror registry SSL certificate chain up to the CA itself.
ca-bundle.crt: |
-----BEGIN CERTIFICATE-----
MIID7jCCAtagAwXXX...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwXXX...
-----END CERTIFICATE-----
# The registries.conf field has been populated using the registries.conf file found in "/etc/containers/registries.conf" on each node.
# Replace <registry.example.com:8443> with the mirror registry's address.
registries.conf: |
unqualified-search-registries = ["registry.access.redhat.com", "docker.io"]
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev/ocp-release"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev/ocp-release"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/multicluster-engine"
[[registry.mirror]]
location = "<registry.example.com:8443>/multicluster-engine"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/odf4"
[[registry.mirror]]
location = "<registry.example.com:8443>/odf4"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/openshift4"
[[registry.mirror]]
location = "<registry.example.com:8443>/openshift4"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhacm2"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhacm2"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhceph"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhceph"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhel8"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhel8"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/rhel9"
[[registry.mirror]]
location = "<registry.example.com:8443>/rhel9"
pull-from-mirror = "digest-only"
[[registry]]
prefix = ""
location = "registry.redhat.io/ubi8"
[[registry.mirror]]
location = "<registry.example.com:8443>/ubi8"
pull-from-mirror = "tag-only"
acmNS.yaml
--- apiVersion: v1 kind: Namespace metadata: labels: openshift.io/cluster-monitoring: "true" name: open-cluster-management
---
apiVersion: v1
kind: Namespace
metadata:
labels:
openshift.io/cluster-monitoring: "true"
name: open-cluster-management
acmOperGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: open-cluster-management-group namespace: open-cluster-management spec: targetNamespaces: - open-cluster-management
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: open-cluster-management-group
namespace: open-cluster-management
spec:
targetNamespaces:
- open-cluster-management
acmPerfSearch.yaml
--- apiVersion: search.open-cluster-management.io/v1alpha1 kind: Search metadata: name: search-v2-operator namespace: open-cluster-management annotations: argocd.argoproj.io/sync-wave: "10" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true spec: dbStorage: size: 10Gi deployments: collector: resources: limits: memory: 8Gi requests: cpu: 25m memory: 64Mi database: envVar: - name: POSTGRESQL_EFFECTIVE_CACHE_SIZE value: 1024MB - name: POSTGRESQL_SHARED_BUFFERS value: 512MB - name: WORK_MEM value: 128MB resources: limits: memory: 16Gi requests: cpu: 25m memory: 32Mi indexer: resources: limits: memory: 4Gi requests: cpu: 25m memory: 128Mi queryapi: replicaCount: 2 resources: limits: memory: 4Gi requests: cpu: 25m memory: 1Gi tolerations: - effect: NoSchedule key: node-role.kubernetes.io/infra operator: Exists
---
apiVersion: search.open-cluster-management.io/v1alpha1
kind: Search
metadata:
name: search-v2-operator
namespace: open-cluster-management
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
dbStorage:
size: 10Gi
deployments:
collector:
resources:
limits:
memory: 8Gi
requests:
cpu: 25m
memory: 64Mi
database:
envVar:
- name: POSTGRESQL_EFFECTIVE_CACHE_SIZE
value: 1024MB
- name: POSTGRESQL_SHARED_BUFFERS
value: 512MB
- name: WORK_MEM
value: 128MB
resources:
limits:
memory: 16Gi
requests:
cpu: 25m
memory: 32Mi
indexer:
resources:
limits:
memory: 4Gi
requests:
cpu: 25m
memory: 128Mi
queryapi:
replicaCount: 2
resources:
limits:
memory: 4Gi
requests:
cpu: 25m
memory: 1Gi
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/infra
operator: Exists
acmProvisioning.yaml
--- apiVersion: metal3.io/v1alpha1 kind: Provisioning metadata: name: provisioning-configuration annotations: argocd.argoproj.io/sync-wave: "6" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true spec: watchAllNamespaces: true # some servers do not support virtual media installations # when the image is served using the https protocol # disableVirtualMediaTLS: true
---
apiVersion: metal3.io/v1alpha1
kind: Provisioning
metadata:
name: provisioning-configuration
annotations:
argocd.argoproj.io/sync-wave: "6"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
watchAllNamespaces: true
# some servers do not support virtual media installations
# when the image is served using the https protocol
# disableVirtualMediaTLS: true
acmSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: open-cluster-management-subscription namespace: open-cluster-management spec: channel: release-2.13 installPlanApproval: Automatic name: advanced-cluster-management source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: open-cluster-management-subscription
namespace: open-cluster-management
spec:
channel: release-2.13
installPlanApproval: Automatic
name: advanced-cluster-management
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
observabilityMCO.yaml
--- apiVersion: observability.open-cluster-management.io/v1beta2 kind: MultiClusterObservability metadata: name: observability annotations: argocd.argoproj.io/sync-wave: "10" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true # avoids MultiClusterHub Observability to own/manage the # spoke clusters configuration about AlertManager forwards. # ZTP Policies will be in charge of configuring it # https://issues.redhat.com/browse/CNF-13398 mco-disable-alerting: "true" spec: # based on the data provided by acm-capacity tool # https://github.com/stolostron/capacity-planning/blob/main/calculation/ObsSizingTemplate-Rev1.ipynb # for an scenario with: # 3500SNOs, 125 pods and 4 Namespaces (apart from Openshift NS) # storage retention 15 days # downsampling disabled # default MCO Addon configuration samples_per_hour, pv_retention_hrs. # More on how to stimate: https://access.redhat.com/articles/7103886 advanced: retentionConfig: blockDuration: 2h deleteDelay: 48h retentionInLocal: 24h retentionResolutionRaw: 15d enableDownsampling: false observabilityAddonSpec: enableMetrics: true interval: 300 storageConfig: storageClass: # your-fs-storageclass-here alertmanagerStorageSize: 10Gi compactStorageSize: 100Gi metricObjectStorage: # buckets storage should provide a capacity # of at least 2.5TB key: thanos.yaml name: thanos-object-storage receiveStorageSize: 10Gi ruleStorageSize: 30Gi storeStorageSize: 100Gi # In addition to these storage settings, the `metricObjectStorage` # points to an Object Storage. Under the reference configuration, # scale and retention the estimated object storage is about 101Gi
---
apiVersion: observability.open-cluster-management.io/v1beta2
kind: MultiClusterObservability
metadata:
name: observability
annotations:
argocd.argoproj.io/sync-wave: "10"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
# avoids MultiClusterHub Observability to own/manage the
# spoke clusters configuration about AlertManager forwards.
# ZTP Policies will be in charge of configuring it
# https://issues.redhat.com/browse/CNF-13398
mco-disable-alerting: "true"
spec:
# based on the data provided by acm-capacity tool
# https://github.com/stolostron/capacity-planning/blob/main/calculation/ObsSizingTemplate-Rev1.ipynb
# for an scenario with:
# 3500SNOs, 125 pods and 4 Namespaces (apart from Openshift NS)
# storage retention 15 days
# downsampling disabled
# default MCO Addon configuration samples_per_hour, pv_retention_hrs.
# More on how to stimate: https://access.redhat.com/articles/7103886
advanced:
retentionConfig:
blockDuration: 2h
deleteDelay: 48h
retentionInLocal: 24h
retentionResolutionRaw: 15d
enableDownsampling: false
observabilityAddonSpec:
enableMetrics: true
interval: 300
storageConfig:
storageClass: # your-fs-storageclass-here
alertmanagerStorageSize: 10Gi
compactStorageSize: 100Gi
metricObjectStorage:
# buckets storage should provide a capacity
# of at least 2.5TB
key: thanos.yaml
name: thanos-object-storage
receiveStorageSize: 10Gi
ruleStorageSize: 30Gi
storeStorageSize: 100Gi
# In addition to these storage settings, the `metricObjectStorage`
# points to an Object Storage. Under the reference configuration,
# scale and retention the estimated object storage is about 101Gi
observabilityNS.yaml
--- apiVersion: v1 kind: Namespace metadata: labels: openshift.io/cluster-monitoring: "true" name: open-cluster-management-observability
---
apiVersion: v1
kind: Namespace
metadata:
labels:
openshift.io/cluster-monitoring: "true"
name: open-cluster-management-observability
observabilityOBC.yaml
--- apiVersion: objectbucket.io/v1alpha1 kind: ObjectBucketClaim metadata: name: observability-obc annotations: argocd.argoproj.io/sync-wave: "8" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true namespace: open-cluster-management-observability spec: generateBucketName: observability-object-bucket storageClassName: openshift-storage.noobaa.io
---
apiVersion: objectbucket.io/v1alpha1
kind: ObjectBucketClaim
metadata:
name: observability-obc
annotations:
argocd.argoproj.io/sync-wave: "8"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
namespace: open-cluster-management-observability
spec:
generateBucketName: observability-object-bucket
storageClassName: openshift-storage.noobaa.io
observabilitySecret.yaml
--- apiVersion: v1 kind: Secret metadata: annotations: argocd.argoproj.io/sync-wave: "9" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true labels: cluster.open-cluster-management.io/backup: "" name: multiclusterhub-operator-pull-secret namespace: open-cluster-management-observability type: kubernetes.io/dockerconfigjson data: .dockerconfigjson: '' # Value provided by user or by pull-secret-openshift-config-copy policy
---
apiVersion: v1
kind: Secret
metadata:
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
labels:
cluster.open-cluster-management.io/backup: ""
name: multiclusterhub-operator-pull-secret
namespace: open-cluster-management-observability
type: kubernetes.io/dockerconfigjson
data:
.dockerconfigjson: '' # Value provided by user or by pull-secret-openshift-config-copy policy
thanosSecret.yaml
# This content creates a policy which copies the necessary data from # the generated Object Bucket Claim into the necessary secret for # observability to connect to thanos. --- apiVersion: policy.open-cluster-management.io/v1 kind: Policy metadata: annotations: policy.open-cluster-management.io/categories: CM Configuration Management policy.open-cluster-management.io/controls: CM-2 Baseline Configuration policy.open-cluster-management.io/description: "" policy.open-cluster-management.io/standards: NIST SP 800-53 argocd.argoproj.io/sync-wave: "9" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true name: obs-thanos-secret namespace: hub-policies spec: disabled: false policy-templates: - objectDefinition: apiVersion: policy.open-cluster-management.io/v1 kind: ConfigurationPolicy metadata: name: thanos-secret-cp spec: remediationAction: enforce severity: high object-templates-raw: | {{- /* read the bucket data and noobaa endpoint access data */ -}} {{- $objBucket := (lookup "v1" "ConfigMap" "open-cluster-management-observability" "observability-obc") }} {{- $awsAccess := (lookup "v1" "Secret" "open-cluster-management-observability" "observability-obc") }} {{- /* create the thanos config file as a template */ -}} {{- $thanosConfig := ` type: s3 config: bucket: %[1]s endpoint: %[2]s insecure: true access_key: %[3]s secret_key: %[4]s ` }} {{- /* create the secret using the thanos configuration template created above. */ -}} - complianceType: mustonlyhave objectDefinition: apiVersion: v1 kind: Secret metadata: name: thanos-object-storage namespace: open-cluster-management-observability type: Opaque data: thanos.yaml: {{ (printf $thanosConfig $objBucket.data.BUCKET_NAME $objBucket.data.BUCKET_HOST ($awsAccess.data.AWS_ACCESS_KEY_ID | base64dec) ($awsAccess.data.AWS_SECRET_ACCESS_KEY | base64dec) ) | base64enc }} --- apiVersion: cluster.open-cluster-management.io/v1beta1 kind: Placement metadata: name: obs-thanos-pl namespace: hub-policies annotations: argocd.argoproj.io/sync-wave: "9" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true spec: predicates: - requiredClusterSelector: labelSelector: matchExpressions: - key: name operator: In values: - local-cluster --- apiVersion: policy.open-cluster-management.io/v1 kind: PlacementBinding metadata: name: obs-thanos-binding namespace: hub-policies annotations: argocd.argoproj.io/sync-wave: "9" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true placementRef: name: obs-thanos-pl apiGroup: cluster.open-cluster-management.io kind: Placement subjects: - name: obs-thanos-secret apiGroup: policy.open-cluster-management.io kind: Policy # For reference this is the secret which is being generated (with # approriate values in the fields): # --- # apiVersion: v1 # kind: Secret # metadata: # name: thanos-object-storage # namespace: open-cluster-management-observability # type: Opaque # stringData: # thanos.yaml: | # type: s3 # config: # bucket: "<BUCKET_NAME>" # endpoint: "<BUCKET_HOST>" # insecure: true # access_key: "<AWS_ACCESS_KEY_ID>" # secret_key: "<AWS_SECRET_ACCESS_KEY>"
# This content creates a policy which copies the necessary data from
# the generated Object Bucket Claim into the necessary secret for
# observability to connect to thanos.
---
apiVersion: policy.open-cluster-management.io/v1
kind: Policy
metadata:
annotations:
policy.open-cluster-management.io/categories: CM Configuration Management
policy.open-cluster-management.io/controls: CM-2 Baseline Configuration
policy.open-cluster-management.io/description: ""
policy.open-cluster-management.io/standards: NIST SP 800-53
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
name: obs-thanos-secret
namespace: hub-policies
spec:
disabled: false
policy-templates:
- objectDefinition:
apiVersion: policy.open-cluster-management.io/v1
kind: ConfigurationPolicy
metadata:
name: thanos-secret-cp
spec:
remediationAction: enforce
severity: high
object-templates-raw: |
{{- /* read the bucket data and noobaa endpoint access data */ -}}
{{- $objBucket := (lookup "v1" "ConfigMap" "open-cluster-management-observability" "observability-obc") }}
{{- $awsAccess := (lookup "v1" "Secret" "open-cluster-management-observability" "observability-obc") }}
{{- /* create the thanos config file as a template */ -}}
{{- $thanosConfig := `
type: s3
config:
bucket: %[1]s
endpoint: %[2]s
insecure: true
access_key: %[3]s
secret_key: %[4]s
`
}}
{{- /* create the secret using the thanos configuration template created above. */ -}}
- complianceType: mustonlyhave
objectDefinition:
apiVersion: v1
kind: Secret
metadata:
name: thanos-object-storage
namespace: open-cluster-management-observability
type: Opaque
data:
thanos.yaml: {{ (printf $thanosConfig $objBucket.data.BUCKET_NAME
$objBucket.data.BUCKET_HOST
($awsAccess.data.AWS_ACCESS_KEY_ID | base64dec)
($awsAccess.data.AWS_SECRET_ACCESS_KEY | base64dec)
) | base64enc }}
---
apiVersion: cluster.open-cluster-management.io/v1beta1
kind: Placement
metadata:
name: obs-thanos-pl
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
predicates:
- requiredClusterSelector:
labelSelector:
matchExpressions:
- key: name
operator: In
values:
- local-cluster
---
apiVersion: policy.open-cluster-management.io/v1
kind: PlacementBinding
metadata:
name: obs-thanos-binding
namespace: hub-policies
annotations:
argocd.argoproj.io/sync-wave: "9"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
placementRef:
name: obs-thanos-pl
apiGroup: cluster.open-cluster-management.io
kind: Placement
subjects:
- name: obs-thanos-secret
apiGroup: policy.open-cluster-management.io
kind: Policy
# For reference this is the secret which is being generated (with
# approriate values in the fields):
# ---
# apiVersion: v1
# kind: Secret
# metadata:
# name: thanos-object-storage
# namespace: open-cluster-management-observability
# type: Opaque
# stringData:
# thanos.yaml: |
# type: s3
# config:
# bucket: "<BUCKET_NAME>"
# endpoint: "<BUCKET_HOST>"
# insecure: true
# access_key: "<AWS_ACCESS_KEY_ID>"
# secret_key: "<AWS_SECRET_ACCESS_KEY>"
talmSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: openshift-topology-aware-lifecycle-manager-subscription namespace: openshift-operators spec: channel: stable installPlanApproval: Automatic name: topology-aware-lifecycle-manager source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: openshift-topology-aware-lifecycle-manager-subscription
namespace: openshift-operators
spec:
channel: stable
installPlanApproval: Automatic
name: topology-aware-lifecycle-manager
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
5.17.2. 存储引用 YAML 复制链接链接已复制到粘贴板!
lsoLocalVolume.yaml
--- apiVersion: "local.storage.openshift.io/v1" kind: "LocalVolume" metadata: name: "local-disks" namespace: "openshift-local-storage" annotations: argocd.argoproj.io/sync-wave: "-3" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true spec: nodeSelector: nodeSelectorTerms: - matchExpressions: - key: cluster.ocs.openshift.io/openshift-storage operator: In values: - "" storageClassDevices: - storageClassName: "local-sc" forceWipeDevicesAndDestroyAllData: true volumeMode: Block devicePaths: - /dev/disk/by-path/pci-xxx
---
apiVersion: "local.storage.openshift.io/v1"
kind: "LocalVolume"
metadata:
name: "local-disks"
namespace: "openshift-local-storage"
annotations:
argocd.argoproj.io/sync-wave: "-3"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
nodeSelector:
nodeSelectorTerms:
- matchExpressions:
- key: cluster.ocs.openshift.io/openshift-storage
operator: In
values:
- ""
storageClassDevices:
- storageClassName: "local-sc"
forceWipeDevicesAndDestroyAllData: true
volumeMode: Block
devicePaths:
- /dev/disk/by-path/pci-xxx
lsoNS.yaml
--- apiVersion: v1 kind: Namespace metadata: name: openshift-local-storage annotations: argocd.argoproj.io/sync-wave: "-5" labels: openshift.io/cluster-monitoring: "true"
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-local-storage
annotations:
argocd.argoproj.io/sync-wave: "-5"
labels:
openshift.io/cluster-monitoring: "true"
lsoOperatorgroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: annotations: argocd.argoproj.io/sync-wave: "-5" name: local-operator-group namespace: openshift-local-storage spec: targetNamespaces: - openshift-local-storage
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: local-operator-group
namespace: openshift-local-storage
spec:
targetNamespaces:
- openshift-local-storage
lsoSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "-5" name: local-storage-operator namespace: openshift-local-storage spec: channel: stable installPlanApproval: Automatic name: local-storage-operator source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: local-storage-operator
namespace: openshift-local-storage
spec:
channel: stable
installPlanApproval: Automatic
name: local-storage-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
odfNS.yaml
--- apiVersion: v1 kind: Namespace metadata: name: openshift-storage annotations: argocd.argoproj.io/sync-wave: "-5" workload.openshift.io/allowed: management labels: openshift.io/cluster-monitoring: "true"
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-storage
annotations:
argocd.argoproj.io/sync-wave: "-5"
workload.openshift.io/allowed: management
labels:
openshift.io/cluster-monitoring: "true"
odfOperatorGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: annotations: argocd.argoproj.io/sync-wave: "-5" name: openshift-storage-operatorgroup namespace: openshift-storage spec: targetNamespaces: - openshift-storage
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: openshift-storage-operatorgroup
namespace: openshift-storage
spec:
targetNamespaces:
- openshift-storage
odfSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: annotations: argocd.argoproj.io/sync-wave: "-5" name: odf-operator namespace: openshift-storage spec: channel: "stable-4.18" name: odf-operator source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
annotations:
argocd.argoproj.io/sync-wave: "-5"
name: odf-operator
namespace: openshift-storage
spec:
channel: "stable-4.18"
name: odf-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
storageCluster.yaml
--- apiVersion: ocs.openshift.io/v1 kind: StorageCluster metadata: name: ocs-storagecluster namespace: openshift-storage annotations: argocd.argoproj.io/sync-wave: "-2" argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true spec: manageNodes: false resources: mds: limits: cpu: "3" memory: "8Gi" requests: cpu: "3" memory: "8Gi" monDataDirHostPath: /var/lib/rook storageDeviceSets: - count: 1 # <-- Modify count to desired value. For each set of 3 disks increment the count by 1. dataPVCTemplate: spec: accessModes: - ReadWriteOnce resources: requests: storage: "600Gi" # <-- This should be changed as per storage size. Minimum 100 GiB and Maximum 4 TiB storageClassName: "local-sc" # match this with the storage block created at the LSO step volumeMode: Block name: ocs-deviceset placement: {} portable: false replica: 3 resources: limits: cpu: "2" memory: "5Gi" requests: cpu: "2" memory: "5Gi"
---
apiVersion: ocs.openshift.io/v1
kind: StorageCluster
metadata:
name: ocs-storagecluster
namespace: openshift-storage
annotations:
argocd.argoproj.io/sync-wave: "-2"
argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true
spec:
manageNodes: false
resources:
mds:
limits:
cpu: "3"
memory: "8Gi"
requests:
cpu: "3"
memory: "8Gi"
monDataDirHostPath: /var/lib/rook
storageDeviceSets:
- count: 1 # <-- Modify count to desired value. For each set of 3 disks increment the count by 1.
dataPVCTemplate:
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: "600Gi" # <-- This should be changed as per storage size. Minimum 100 GiB and Maximum 4 TiB
storageClassName: "local-sc" # match this with the storage block created at the LSO step
volumeMode: Block
name: ocs-deviceset
placement: {}
portable: false
replica: 3
resources:
limits:
cpu: "2"
memory: "5Gi"
requests:
cpu: "2"
memory: "5Gi"
5.17.3. GitOps Operator 和 GitOps ZTP 参考 YAML 复制链接链接已复制到粘贴板!
argocd-ssh-known-hosts-cm.yaml
--- apiVersion: v1 kind: ConfigMap metadata: name: argocd-ssh-known-hosts-cm namespace: openshift-gitops data: ssh_known_hosts: | ############################################################# # by default empty known hosts, because of usual # # disconnected environments. # # # # Manually add needed ssh known hosts: # # example: $> ssh-keyscan my-github.com # # Copy the output here ############################################################# # my-github.com sh-rsa AAAAB3NzaC1y...J4i36KV/aCl4Ixz # my-github.com ecdsa-sha2-nistp256...GGtLKqmwLLeKhe6xgc= # my-github-com ssh-ed25519 AAAAC3N...lNrvWjBQ2u
---
apiVersion: v1
kind: ConfigMap
metadata:
name: argocd-ssh-known-hosts-cm
namespace: openshift-gitops
data:
ssh_known_hosts: |
#############################################################
# by default empty known hosts, because of usual #
# disconnected environments. #
# #
# Manually add needed ssh known hosts: #
# example: $> ssh-keyscan my-github.com #
# Copy the output here
#############################################################
# my-github.com sh-rsa AAAAB3NzaC1y...J4i36KV/aCl4Ixz
# my-github.com ecdsa-sha2-nistp256...GGtLKqmwLLeKhe6xgc=
# my-github-com ssh-ed25519 AAAAC3N...lNrvWjBQ2u
gitopsNS.yaml
--- apiVersion: v1 kind: Namespace metadata: name: openshift-gitops-operator labels: openshift.io/cluster-monitoring: "true"
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-gitops-operator
labels:
openshift.io/cluster-monitoring: "true"
gitopsOperatorGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: openshift-gitops-operator namespace: openshift-gitops-operator spec: upgradeStrategy: Default
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: openshift-gitops-operator
namespace: openshift-gitops-operator
spec:
upgradeStrategy: Default
gitopsSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: openshift-gitops-operator namespace: openshift-gitops-operator spec: channel: gitops-1.16 installPlanApproval: Automatic name: openshift-gitops-operator source: redhat-operators-disconnected sourceNamespace: openshift-marketplace
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: openshift-gitops-operator
namespace: openshift-gitops-operator
spec:
channel: gitops-1.16
installPlanApproval: Automatic
name: openshift-gitops-operator
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
ztp-repo.yaml
--- apiVersion: v1 kind: Secret metadata: name: ztp-repo namespace: openshift-gitops labels: argocd.argoproj.io/secret-type: repository stringData: # use following for ssh repo access url: git@gitlab.example.com:namespace/repo.git insecure: "false" sshPrivateKey: | -----BEGIN OPENSSH PRIVATE KEY----- INSERT PRIVATE KEY -----END OPENSSH PRIVATE KEY----- # uncomment and use following for https repo access # url: https://gitlab.example.com/namespace/repo # insecure: "false" # password: password # username: username # forceHttpBasicAuth: "true" # more examples: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-repositories-yaml/
---
apiVersion: v1
kind: Secret
metadata:
name: ztp-repo
namespace: openshift-gitops
labels:
argocd.argoproj.io/secret-type: repository
stringData:
# use following for ssh repo access
url: git@gitlab.example.com:namespace/repo.git
insecure: "false"
sshPrivateKey: |
-----BEGIN OPENSSH PRIVATE KEY-----
INSERT PRIVATE KEY
-----END OPENSSH PRIVATE KEY-----
# uncomment and use following for https repo access
# url: https://gitlab.example.com/namespace/repo
# insecure: "false"
# password: password
# username: username
# forceHttpBasicAuth: "true"
# more examples: https://argo-cd.readthedocs.io/en/stable/operator-manual/argocd-repositories-yaml/
app-project.yaml
--- apiVersion: argoproj.io/v1alpha1 kind: AppProject metadata: name: ztp-app-project namespace: openshift-gitops annotations: argocd.argoproj.io/sync-wave: "100" spec: clusterResourceWhitelist: - group: 'hive.openshift.io' kind: ClusterImageSet - group: 'cluster.open-cluster-management.io' kind: ManagedCluster - group: '' kind: Namespace destinations: - namespace: '*' server: '*' namespaceResourceWhitelist: - group: '' kind: ConfigMap - group: '' kind: Namespace - group: '' kind: Secret - group: 'agent-install.openshift.io' kind: InfraEnv - group: 'agent-install.openshift.io' kind: NMStateConfig - group: 'extensions.hive.openshift.io' kind: AgentClusterInstall - group: 'extensions.hive.openshift.io' kind: ImageClusterInstall - group: 'hive.openshift.io' kind: ClusterDeployment - group: 'metal3.io' kind: BareMetalHost - group: 'metal3.io' kind: HostFirmwareSettings - group: 'metal3.io' kind: DataImage - group: 'agent.open-cluster-management.io' kind: KlusterletAddonConfig - group: 'cluster.open-cluster-management.io' kind: ManagedCluster - group: 'ran.openshift.io' kind: SiteConfig - group: 'siteconfig.open-cluster-management.io' kind: ClusterInstance - group: 'redhatcop.redhat.io' kind: VaultSecret sourceRepos: - '*'
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: ztp-app-project
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
clusterResourceWhitelist:
- group: 'hive.openshift.io'
kind: ClusterImageSet
- group: 'cluster.open-cluster-management.io'
kind: ManagedCluster
- group: ''
kind: Namespace
destinations:
- namespace: '*'
server: '*'
namespaceResourceWhitelist:
- group: ''
kind: ConfigMap
- group: ''
kind: Namespace
- group: ''
kind: Secret
- group: 'agent-install.openshift.io'
kind: InfraEnv
- group: 'agent-install.openshift.io'
kind: NMStateConfig
- group: 'extensions.hive.openshift.io'
kind: AgentClusterInstall
- group: 'extensions.hive.openshift.io'
kind: ImageClusterInstall
- group: 'hive.openshift.io'
kind: ClusterDeployment
- group: 'metal3.io'
kind: BareMetalHost
- group: 'metal3.io'
kind: HostFirmwareSettings
- group: 'metal3.io'
kind: DataImage
- group: 'agent.open-cluster-management.io'
kind: KlusterletAddonConfig
- group: 'cluster.open-cluster-management.io'
kind: ManagedCluster
- group: 'ran.openshift.io'
kind: SiteConfig
- group: 'siteconfig.open-cluster-management.io'
kind: ClusterInstance
- group: 'redhatcop.redhat.io'
kind: VaultSecret
sourceRepos:
- '*'
clusters-app.yaml
--- apiVersion: v1 kind: Namespace metadata: name: clusters-sub annotations: argocd.argoproj.io/sync-wave: "100" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: clusters namespace: openshift-gitops annotations: argocd.argoproj.io/sync-wave: "100" spec: destination: server: https://kubernetes.default.svc namespace: clusters-sub project: ztp-app-project source: path: ztp/gitops-subscriptions/argocd/example/siteconfig repoURL: https://github.com/openshift-kni/cnf-features-deploy targetRevision: master # uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where # the sitconfig.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server # plugin: # name: kustomize-with-local-plugins ignoreDifferences: # recommended way to allow ACM controller to manage its fields. alternative approach documented below (1) - group: cluster.open-cluster-management.io kind: ManagedCluster managedFieldsManagers: - controller # (1) alternatively you can choose to ignore a specific path like so (replace managedFieldsManagers with jsonPointers) # jsonPointers: # - /metadata/labels/cloud # - /metadata/labels/vendor syncPolicy: automated: prune: true selfHeal: true syncOptions: - CreateNamespace=true - PrunePropagationPolicy=background - RespectIgnoreDifferences=true
---
apiVersion: v1
kind: Namespace
metadata:
name: clusters-sub
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: clusters
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
destination:
server: https://kubernetes.default.svc
namespace: clusters-sub
project: ztp-app-project
source:
path: ztp/gitops-subscriptions/argocd/example/siteconfig
repoURL: https://github.com/openshift-kni/cnf-features-deploy
targetRevision: master
# uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where
# the sitconfig.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server
# plugin:
# name: kustomize-with-local-plugins
ignoreDifferences: # recommended way to allow ACM controller to manage its fields. alternative approach documented below (1)
- group: cluster.open-cluster-management.io
kind: ManagedCluster
managedFieldsManagers:
- controller
# (1) alternatively you can choose to ignore a specific path like so (replace managedFieldsManagers with jsonPointers)
# jsonPointers:
# - /metadata/labels/cloud
# - /metadata/labels/vendor
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=background
- RespectIgnoreDifferences=true
gitops-cluster-rolebinding.yaml
--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: gitops-cluster annotations: argocd.argoproj.io/sync-wave: "100" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount name: openshift-gitops-argocd-application-controller namespace: openshift-gitops
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-cluster
annotations:
argocd.argoproj.io/sync-wave: "100"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
gitops-policy-rolebinding.yaml
--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: gitops-policy annotations: argocd.argoproj.io/sync-wave: "100" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: open-cluster-management:cluster-manager-admin subjects: - kind: ServiceAccount name: openshift-gitops-argocd-application-controller namespace: openshift-gitops
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: gitops-policy
annotations:
argocd.argoproj.io/sync-wave: "100"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: open-cluster-management:cluster-manager-admin
subjects:
- kind: ServiceAccount
name: openshift-gitops-argocd-application-controller
namespace: openshift-gitops
kustomization.yaml
--- apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization resources: - app-project.yaml - policies-app-project.yaml - gitops-policy-rolebinding.yaml - gitops-cluster-rolebinding.yaml - clusters-app.yaml - policies-app.yaml metadata: annotations: argocd.argoproj.io/sync-wave: "100"
---
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- app-project.yaml
- policies-app-project.yaml
- gitops-policy-rolebinding.yaml
- gitops-cluster-rolebinding.yaml
- clusters-app.yaml
- policies-app.yaml
metadata:
annotations:
argocd.argoproj.io/sync-wave: "100"
policies-app-project.yaml
--- apiVersion: argoproj.io/v1alpha1 kind: AppProject metadata: name: policy-app-project namespace: openshift-gitops annotations: argocd.argoproj.io/sync-wave: "100" spec: clusterResourceWhitelist: - group: '' kind: Namespace - group: 'hive.openshift.io' kind: ClusterImageSet destinations: - namespace: 'ztp*' server: '*' - namespace: 'policies-sub' server: '*' namespaceResourceWhitelist: - group: '' kind: ConfigMap - group: '' kind: Namespace - group: 'apps.open-cluster-management.io' kind: PlacementRule - group: 'policy.open-cluster-management.io' kind: Policy - group: 'policy.open-cluster-management.io' kind: PlacementBinding - group: 'ran.openshift.io' kind: PolicyGenTemplate - group: cluster.open-cluster-management.io kind: Placement - group: policy.open-cluster-management.io kind: PolicyGenerator - group: policy.open-cluster-management.io kind: PolicySet - group: cluster.open-cluster-management.io kind: ManagedClusterSetBinding sourceRepos: - '*'
---
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: policy-app-project
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
clusterResourceWhitelist:
- group: ''
kind: Namespace
- group: 'hive.openshift.io'
kind: ClusterImageSet
destinations:
- namespace: 'ztp*'
server: '*'
- namespace: 'policies-sub'
server: '*'
namespaceResourceWhitelist:
- group: ''
kind: ConfigMap
- group: ''
kind: Namespace
- group: 'apps.open-cluster-management.io'
kind: PlacementRule
- group: 'policy.open-cluster-management.io'
kind: Policy
- group: 'policy.open-cluster-management.io'
kind: PlacementBinding
- group: 'ran.openshift.io'
kind: PolicyGenTemplate
- group: cluster.open-cluster-management.io
kind: Placement
- group: policy.open-cluster-management.io
kind: PolicyGenerator
- group: policy.open-cluster-management.io
kind: PolicySet
- group: cluster.open-cluster-management.io
kind: ManagedClusterSetBinding
sourceRepos:
- '*'
policies-app.yaml
--- apiVersion: v1 kind: Namespace metadata: name: policies-sub annotations: argocd.argoproj.io/sync-wave: "100" --- apiVersion: argoproj.io/v1alpha1 kind: Application metadata: name: policies namespace: openshift-gitops annotations: argocd.argoproj.io/sync-wave: "100" spec: destination: server: https://kubernetes.default.svc namespace: policies-sub project: policy-app-project source: path: ztp/gitops-subscriptions/argocd/example/policygentemplates repoURL: https://github.com/openshift-kni/cnf-features-deploy targetRevision: master # uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where # the policyGenTemplate.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server # plugin: # name: kustomize-with-local-plugins syncPolicy: automated: prune: true selfHeal: true syncOptions: - CreateNamespace=true
---
apiVersion: v1
kind: Namespace
metadata:
name: policies-sub
annotations:
argocd.argoproj.io/sync-wave: "100"
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: policies
namespace: openshift-gitops
annotations:
argocd.argoproj.io/sync-wave: "100"
spec:
destination:
server: https://kubernetes.default.svc
namespace: policies-sub
project: policy-app-project
source:
path: ztp/gitops-subscriptions/argocd/example/policygentemplates
repoURL: https://github.com/openshift-kni/cnf-features-deploy
targetRevision: master
# uncomment the below plugin if you will be adding the plugin binaries in the same repo->dir where
# the policyGenTemplate.yaml exist AND use the ../../hack/patch-argocd-dev.sh script to re-patch the deployment-repo-server
# plugin:
# name: kustomize-with-local-plugins
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
5.17.4. 日志记录引用 YAML 复制链接链接已复制到粘贴板!
clusterLogNS.yaml
--- apiVersion: v1 kind: Namespace metadata: name: openshift-logging annotations: workload.openshift.io/allowed: management
---
apiVersion: v1
kind: Namespace
metadata:
name: openshift-logging
annotations:
workload.openshift.io/allowed: management
clusterLogOperGroup.yaml
--- apiVersion: operators.coreos.com/v1 kind: OperatorGroup metadata: name: cluster-logging namespace: openshift-logging spec: targetNamespaces: - openshift-logging
---
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: cluster-logging
namespace: openshift-logging
spec:
targetNamespaces:
- openshift-logging
clusterLogSubscription.yaml
--- apiVersion: operators.coreos.com/v1alpha1 kind: Subscription metadata: name: cluster-logging namespace: openshift-logging spec: channel: "stable-6.2" name: cluster-logging source: redhat-operators-disconnected sourceNamespace: openshift-marketplace installPlanApproval: Automatic
---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: cluster-logging
namespace: openshift-logging
spec:
channel: "stable-6.2"
name: cluster-logging
source: redhat-operators-disconnected
sourceNamespace: openshift-marketplace
installPlanApproval: Automatic
5.17.5. 安装参考 YAML 复制链接链接已复制到粘贴板!
agent-config.yaml
--- apiVersion: v1beta1 kind: AgentConfig metadata: name: hub # need to match the same name put in install-config rendezvousIP: 192.168.125.20 # one of the master IP # Replace the fields below with your network details hosts: - hostname: hub-ctl-0 role: master interfaces: - name: ens3 macAddress: aa:aa:aa:aa:01:01 networkConfig: interfaces: - name: ens3 mac-address: aa:aa:aa:aa:01:01 ipv4: enabled: true dhcp: true ipv6: enabled: true dhcp: false address: - ip: fd01::20 prefix-length: 64 routes: config: - destination: ::/0 next-hop-address: fd01::1 next-hop-interface: ens3 table-id: 254 rootDeviceHints: deviceName: "/dev/disk/by-path/pci-0000:00:07.0" - hostname: hub-ctl-1 role: master interfaces: - name: ens3 macAddress: aa:aa:aa:aa:01:02 networkConfig: interfaces: - name: ens3 mac-address: aa:aa:aa:aa:01:02 ipv4: enabled: true dhcp: true ipv6: enabled: true dhcp: false address: - ip: fd01::21 prefix-length: 64 routes: config: - destination: ::/0 next-hop-address: fd01::1 next-hop-interface: ens3 table-id: 254 rootDeviceHints: deviceName: "/dev/disk/by-path/pci-0000:00:07.0" - hostname: hub-ctl-2 role: master interfaces: - name: ens3 macAddress: aa:aa:aa:aa:01:03 networkConfig: interfaces: - name: ens3 mac-address: aa:aa:aa:aa:01:03 ipv4: enabled: true dhcp: true ipv6: enabled: true dhcp: false address: - ip: fd01::22 prefix-length: 64 routes: config: - destination: ::/0 next-hop-address: fd01::1 next-hop-interface: ens3 table-id: 254 rootDeviceHints: deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
---
apiVersion: v1beta1
kind: AgentConfig
metadata:
name: hub # need to match the same name put in install-config
rendezvousIP: 192.168.125.20 # one of the master IP
# Replace the fields below with your network details
hosts:
- hostname: hub-ctl-0
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:01
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:01
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::20
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
- hostname: hub-ctl-1
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:02
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:02
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::21
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
- hostname: hub-ctl-2
role: master
interfaces:
- name: ens3
macAddress: aa:aa:aa:aa:01:03
networkConfig:
interfaces:
- name: ens3
mac-address: aa:aa:aa:aa:01:03
ipv4:
enabled: true
dhcp: true
ipv6:
enabled: true
dhcp: false
address:
- ip: fd01::22
prefix-length: 64
routes:
config:
- destination: ::/0
next-hop-address: fd01::1
next-hop-interface: ens3
table-id: 254
rootDeviceHints:
deviceName: "/dev/disk/by-path/pci-0000:00:07.0"
install-config.yaml
--- apiVersion: v1 metadata: name: hub # replace with your hub name baseDomain: example.com # replace with your domain name compute: - architecture: amd64 hyperthreading: Enabled name: worker replicas: 0 controlPlane: architecture: amd64 hyperthreading: Enabled name: master replicas: 3 networking: clusterNetwork: - cidr: 10.128.0.0/14 hostPrefix: 23 - cidr: fd02::/48 hostPrefix: 64 machineNetwork: - cidr: 192.168.125.0/24 # replace with your machine network CIDR - cidr: fd01::/64 networkType: OVNKubernetes serviceNetwork: - 172.30.0.0/16 - fd03::/112 # Replace the fields below with your network details platform: baremetal: provisioningNetwork: "Disabled" apiVIPs: - 192.168.125.10 - fd01::10 ingressVIPs: - 192.168.125.11 - fd01::11 # Replace <registry.example.com:8443> with the mirror registry's address. imageDigestSources: - mirrors: - <registry.example.com:8443>/openshift-release-dev/ocp-release source: quay.io/openshift-release-dev/ocp-release - mirrors: - <registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev source: quay.io/openshift-release-dev/ocp-v4.0-art-dev # Add the mirror registry SSL certificate chain up to the CA itself. additionalTrustBundle: | -----BEGIN CERTIFICATE----- MIID7jCCAtagAwXXX... -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- MIIDvTCCAqWgAwIBAgIUcXQpXXX... -----END CERTIFICATE----- # Add the mirror registry credentials to the pull secret. pullSecret: '{"auths":{"<registry.example.com:8443>":{"auth": "aW5pdDo0R1XXXXXjdCbUoweUNuMWI1OTZBMmhkcEhjMw==","email": "user@redhat.com"},...}}}' # Add the SSH public key to connect to the OCP nodes sshKey: | ssh-rsa AAAAB3NzaC1yc2EA...
---
apiVersion: v1
metadata:
name: hub # replace with your hub name
baseDomain: example.com # replace with your domain name
compute:
- architecture: amd64
hyperthreading: Enabled
name: worker
replicas: 0
controlPlane:
architecture: amd64
hyperthreading: Enabled
name: master
replicas: 3
networking:
clusterNetwork:
- cidr: 10.128.0.0/14
hostPrefix: 23
- cidr: fd02::/48
hostPrefix: 64
machineNetwork:
- cidr: 192.168.125.0/24 # replace with your machine network CIDR
- cidr: fd01::/64
networkType: OVNKubernetes
serviceNetwork:
- 172.30.0.0/16
- fd03::/112
# Replace the fields below with your network details
platform:
baremetal:
provisioningNetwork: "Disabled"
apiVIPs:
- 192.168.125.10
- fd01::10
ingressVIPs:
- 192.168.125.11
- fd01::11
# Replace <registry.example.com:8443> with the mirror registry's address.
imageDigestSources:
- mirrors:
- <registry.example.com:8443>/openshift-release-dev/ocp-release
source: quay.io/openshift-release-dev/ocp-release
- mirrors:
- <registry.example.com:8443>/openshift-release-dev/ocp-v4.0-art-dev
source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
# Add the mirror registry SSL certificate chain up to the CA itself.
additionalTrustBundle: |
-----BEGIN CERTIFICATE-----
MIID7jCCAtagAwXXX...
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwIBAgIUcXQpXXX...
-----END CERTIFICATE-----
# Add the mirror registry credentials to the pull secret.
pullSecret: '{"auths":{"<registry.example.com:8443>":{"auth": "aW5pdDo0R1XXXXXjdCbUoweUNuMWI1OTZBMmhkcEhjMw==","email": "user@redhat.com"},...}}}'
# Add the SSH public key to connect to the OCP nodes
sshKey: |
ssh-rsa AAAAB3NzaC1yc2EA...