IPs that corresponds to the Gluster Network
STEP-1: Generate Keys, Certificates & CA files
The following section generates the keys,certicates, creates
ca file and distributes it to all the hosts
As the certificates are already generated, its enough to stop
rest of the volumes,set TLS/SSL related volume options, and
start the volume
STEP-2: Stop all the volumes
STEP-3: Set volume options on all the volumes to enable TLS/SSL on the volumes
STEP-4: Start all the volumes
# IPs that corresponds to the Gluster Network
[hosts]
<Gluster_Network_NodeA>
<Gluster_Network_NodeB>
<Gluster_Network_NodeC>
# STEP-1: Generate Keys, Certificates & CA files
# The following section generates the keys,certicates, creates
# ca file and distributes it to all the hosts
[volume1]
action=enable-ssl
volname=engine
ssl_clients=<Gluster_Network_NodeA>,<Gluster_Network_NodeB>,<Gluster_Network_NodeC>
ignore_volume_errors=no
# As the certificates are already generated, its enough to stop
# rest of the volumes,set TLS/SSL related volume options, and
# start the volume
# STEP-2: Stop all the volumes
[volume2]
action=stop
volname=vmstore
[volume3]
action=stop
volname=data
# STEP-3: Set volume options on all the volumes to enable TLS/SSL on the volumes
[volume4]
action=set
volname=vmstore
key=client.ssl,server.ssl,auth.ssl-allow
value=on,on,"<Gluster_Network_NodeA>;<Gluster_Network_NodeB>;<Gluster_Network_NodeC>"
ignore_volume_errors=no
[volume5]
action=set
volname=data
key=client.ssl,server.ssl,auth.ssl-allow
value=on,on,"<Gluster_Network_NodeA>;<Gluster_Network_NodeB>;<Gluster_Network_NodeC>"
ignore_volume_errors=no
# STEP-4: Start all the volumes
[volume6]
action=start
volname=vmstore
[volume7]
action=start
volname=data
Copy to ClipboardCopied!Toggle word wrapToggle overflow
EDITME: @1: Change to IP addresses of the network intended for gluster traffic
Values provided here are used to probe the gluster hosts.
EDITME: @3: Specify the number of data disks in RAID configuration
EDITME : @4: Provide the subscription details
Register to RHSM only on the node which needs to be replaced
Disable multipath
# EDITME: @1: Change to IP addresses of the network intended for gluster traffic
# Values provided here are used to probe the gluster hosts.
[hosts]
10.70.X1.Y1
#EDITME : @2: Change to IP addresses of the network intended for gluster traffic
#of the node which is going to be replaced.
[script1]
action=execute
ignore_script_errors=no
file=/usr/share/ansible/gdeploy/scripts/grafton-sanity-check.sh -d sdc -h 10.70.X1.Y1
# EDITME: @3: Specify the number of data disks in RAID configuration
[disktype]
raid6
[diskcount]
4
[stripesize]
256
# EDITME : @4: Provide the subscription details
# Register to RHSM only on the node which needs to be replaced
[RH-subscription1:10.70.X1.Y1]
action=register
username=<username>
password=<passwd>
pool=<pool-id>
[RH-subscription2]
action=disable-repos
repos=
[RH-subscription3]
action=enable-repos
repos=rhel-7-server-rpms,rh-gluster-3-for-rhel-7-server-rpms,rhel-7-server-rhv-4-mgmt-agent-rpms
[yum1]
action=install
packages=glusterfs-server,vdsm-gluster,ovirt-hosted-engine-setup,gdeploy,cockpit-ovirt-dashboard
update=yes
[service1]
action=enable
service=chronyd
[service2]
action=restart
service=chronyd
[shell1]
action=execute
command=gluster pool list
[shell2]
action=execute
command=vdsm-tool configure --force
# Disable multipath
[script3]
action=execute
file=/usr/share/ansible/gdeploy/scripts/disable-multipath.sh
#EDIT ME: @5: UNCOMMENT SECTIONS ONLY: if original brick disks have to be replaced.
#[pv1]
#action=create
#devices=sdc
#ignore_pv_errors=no
#[vg1]
#action=create
#vgname=gluster_vg_sdc
#pvname=sdc
#ignore_vg_errors=no
#[lv2:10.70.X1:Y1]
#action=create
#poolname=gluster_thinpool_sdc
#ignore_lv_errors=no
#vgname=gluster_vg_sdc
#lvtype=thinpool
#poolmetadatasize=16GB
#size=14TB
#[lv3:10.70.X1:Y1]
#action=create
#lvname=gluster_lv_engine
#ignore_lv_errors=no
#vgname=gluster_vg_sdc
#mount=/gluster_bricks/engine
#size=100GB
#lvtype=thick
#[lv5:10.70.X1:Y1]
#action=create
#lvname=gluster_lv_data
#ignore_lv_errors=no
#vgname=gluster_vg_sdc
#mount=/gluster_bricks/data
#lvtype=thinlv
#poolname=gluster_thinpool_sdc
#virtualsize=12TB
#[lv7:10.70.X1:Y1]
#action=create
#lvname=gluster_lv_vmstore
#ignore_lv_errors=no
#vgname=gluster_vg_sdc
#mount=/gluster_bricks/vmstore
#lvtype=thinlv
#poolname=gluster_thinpool_sdc
#virtualsize=1TB
#[selinux]
#yes
#[lv9:10.70.X1:Y1]
#action=setup-cache
#ssd=sdb
#vgname=gluster_vg_sdc
#poolname=lvthinpool
#cache_lv=lvcache
#cache_lvsize=180GB
[service3]
action=start
service=glusterd
slice_setup=yes
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
services=glusterfs
[script2]
action=execute
file=/usr/share/ansible/gdeploy/scripts/disable-gluster-hooks.sh
Copy to ClipboardCopied!Toggle word wrapToggle overflow
Add the hosts to be added
If using RHEL 7 as platform, enable required repos
RHVH has all the packages available
If using RHEL 7 as platform, have the following section to install packages
enable chronyd
start chronyd service
Setup glusterfs slice
Open the required ports and firewalld services
Disable gluster hook scripts
# Add the hosts to be added
[hosts]
<Gluster_Network_NodeD>
<Gluster_Network_NodeE>
<Gluster_Network_NodeF>
# If using RHEL 7 as platform, enable required repos
# RHVH has all the packages available
[RH-subscription]
ignore_register_errors=no
ignore_attach_pool_errors=no
ignore_enable_errors=no
action=register
username=<username>
password=<mypassword>
pool=<pool-id>
repos=rhel-7-server-rpms,rh-gluster-3-for-rhel-7-server-rpms,rhel-7-server-rhv-4-mgmt-agent-rpms
disable-repos=yes
# If using RHEL 7 as platform, have the following section to install packages
[yum1]
action=install
packages=glusterfs-server,vdsm-gluster,ovirt-hosted-engine-setup,gdeploy,cockpit-ovirt-dashboard
update=yes
gpgcheck=yes
ignore_yum_errors=no
# enable chronyd
[service1]
action=enable
service=chronyd
# start chronyd service
[service2]
action=restart
service=chronyd
# Setup glusterfs slice
[service3]
action=restart
service=glusterd
slice_setup=yes
# Open the required ports and firewalld services
[firewalld]
action=add
ports=111/tcp,2049/tcp,54321/tcp,5900/tcp,5900-6923/tcp,5666/tcp,16514/tcp,54322/tcp
services=glusterfs
# Disable gluster hook scripts
[script2]
action=execute
file=/usr/share/ansible/gdeploy/scripts/disable-gluster-hooks.sh
Copy to ClipboardCopied!Toggle word wrapToggle overflow
Ayudamos a los usuarios de Red Hat a innovar y alcanzar sus objetivos con nuestros productos y servicios con contenido en el que pueden confiar. Explore nuestras recientes actualizaciones.
Hacer que el código abierto sea más inclusivo
Red Hat se compromete a reemplazar el lenguaje problemático en nuestro código, documentación y propiedades web. Para más detalles, consulte el Blog de Red Hat.
Acerca de Red Hat
Ofrecemos soluciones reforzadas que facilitan a las empresas trabajar en plataformas y entornos, desde el centro de datos central hasta el perímetro de la red.