Saltar a contenido

Clúster Activo - Activo

En esta configuración haremos uso de dos servidores (vserver4 y vserver5), ambos con hardware y software similares. Raids de software, drbd 8 dual primario y control de clúster pacemaker con sistema de archivos de clúster GFS2. El SO utilizado fue Fedora 22.

Redes

Renombrar interfaces

new_name=escola; 
old_name=enp3s0; 
echo SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"?*\", \
ATTR{address}==\"$(cat /sys/class/net/$old_name/address)\", \
ATTR{type}==\"1\", KERNEL==\"e*\", \
NAME=\"$new_name\" >> /etc/udev/rules.d/70-persistent-net.rules

new_name=drbd; 
old_name=enp1s0; 
echo SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"?*\", \
ATTR{address}==\"$(cat /sys/class/net/$old_name/address)\", \
ATTR{type}==\"1\", KERNEL==\"e*\", \
NAME=\"$new_name\" >> /etc/udev/rules.d/70-persistent-net.rules

new_name=dual0; 
old_name=enp2s0f0; 
echo SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"?*\", \
ATTR{address}==\"$(cat /sys/class/net/$old_name/address)\", \
ATTR{type}==\"1\", KERNEL==\"e*\", \
NAME=\"$new_name\" >> /etc/udev/rules.d/70-persistent-net.rules

new_name=dual1; 
old_name=enp2s0f1; 
echo SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"?*\", \
ATTR{address}==\"$(cat /sys/class/net/$old_name/address)\", \
ATTR{type}==\"1\", KERNEL==\"e*\", \
NAME=\"$new_name\" >> /etc/udev/rules.d/70-persistent-net.rules

Configuraciones de interfaz de red

...existing network interface configurations...

Particionado y raids

  • 1 SSD para el SO
  • 1 disco SSD Intel de alta gama de 100GB para usar como caché
  • 3 discos duros de 500GB (raid 1)

Formatear discos y crear particiones

    parted -a optimal -s /dev/sda mklabel msdos
    parted -a optimal -s /dev/sdc mklabel msdos
    parted -a optimal -s /dev/sdd mklabel msdos
    parted -a optimal -s /dev/sde mklabel msdos

    ...existing lsblk output...

Ajustar parámetros de discos SSD

Deshabilitar swap y evitar escrituras en lectura (noatime):

    echo "vm.swappiness=1" >> /etc/sysctl.d/99-sysctl.conf

Y en fstab:

    noatime,nodiratime,discard

Crear raid 1

    mdadm --create /dev/md0 --level=mirror --raid-devices=2 /dev/sdc1 /dev/sdd1 --spare-devices=1 /dev/sde1 
    cat /proc/mdstat 

Crear caché LVM (sobre raid)

Volúmenes

    pvcreate /dev/md0
    vgcreate vg_data /dev/md0
    pvcreate /dev/sdb1

Caché

    vgextend vg_data /dev/sdb1
    lvcreate -L 2G -n lv_cache_meta vg_data /dev/sdb1
    lvcreate -L 88G -n lv_cache_data vg_data /dev/sdb1
    lvcreate -l 100%FREE -n lv_data vg_data /dev/md0
    lvconvert --yes --type cache-pool --cachemode writeback --poolmetadata vg_data/lv_cache_meta vg_data/lv_cache_data
    lvconvert --type cache --cachepool vg_data/lv_cache_data vg_data/lv_data

    lsblk 
    lvdisplay 
    lvdisplay -a

Ajuste de Fedora

Cortafuegos

    systemctl stop firewalld
    systemctl disable firewalld

Selinux

    setenforce 0
    sed -i s/SELINUX=enforcing/SELINUX=permissive/ /etc/sysconfig/selinux
    sed -i s/SELINUX=enforcing/SELINUX=permissive/ /etc/selinux/config
    sestatus 

Utilidades de paquetes

    yum -y install vim git tmux
    yum -y update

Protocolo de tiempo de red

yum -y install ntp
systemctl start ntpd
systemctl status ntpd
systemctl enable ntpd
date

Ajuste del historial de bash

    cat >> .bashrc << "EOF"

    # bash_history infinite
    export HISTFILESIZE=
    export HISTSIZE=
    export HISTTIMEFORMAT="[%F %T] "

    # Avoid duplicates
    export HISTCONTROL=ignoredups:erasedups  
    # When the shell exits, append to the history file instead of overwriting it
    shopt -s histappend

    # After each command, append to the history file and reread it
    export PROMPT_COMMAND="${PROMPT_COMMAND:+$PROMPT_COMMAND$'\n'}history -a; history -c; history -r"

    alias history_cleaned="cat .bash_history |grep -a -v ^'#'"

    export TMOUT=3600

    EOF

DRBD 8

Instalar paquetes

    yum -y install drbd drbd-bash-completion drbd-utils 

Archivos de configuración

Get the samples from installed packages:

    cp -a /etc/drbd.conf /root/drbd.conf.dist.f22
    cp -a /etc/drbd.d/global_common.conf /root/drbd_global_common.conf.dist.f22

drbd.conf:

    global {
            usage-count yes;
    }

    common {
            handlers {
            }

            startup {
            }

            options {
            }

            disk {
            }

            net {
                    protocol C;

                    allow-two-primaries;
                    after-sb-0pri discard-zero-changes;
                    after-sb-1pri discard-secondary;
                    after-sb-2pri disconnect;
            }
    }

Resources: /etc/drbd.d/vdisks.res

    resource vdisks {
         device    /dev/drbd0;
         disk      /dev/vg_data/lv_data;
         meta-disk internal;
        on vserver4 {
         address   10.1.3.24:7789;
        }
        on vserver5 {
         address   10.1.3.25:7789;
        }
    }
We create drbdmetadata

    drbdadm create-md vdisks

    [...]
    Writing meta data...
    New drbd meta data block successfully created.
    success

We can 'dry-run' adjust to check config files:

    drbdadm -d adjust all
The we can execute:
    drbdadm adjust all

Verify on both servers:

    [root@vserver4 ~]# cat /proc/drbd 
    version: 8.4.5 (api:1/proto:86-101)
    srcversion: 5A4F43804B37BB28FCB1F47 
     0: cs:Connected ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r-----
        ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:0 ua:0 ap:0 ep:1 wo:f oos:488236452

We can force primary on one server (vserver4):

    drbdadm primary --force vdisks

And we do it again on the other server (vserver5) as we want a dual primary configuration:

    drbdadm primary vdisks

    [root@vserver4 ~]# cat /proc/drbd 
    version: 8.4.5 (api:1/proto:86-101)
    srcversion: 5A4F43804B37BB28FCB1F47 
     0: cs:SyncSource ro:Primary/Primary ds:UpToDate/Inconsistent C r-----
        ns:17322104 nr:0 dw:0 dr:17323016 al:0 bm:0 lo:2 pe:2 ua:2 ap:0 ep:1 wo:f oos:470916516
        [>....................] sync'ed:  3.6% (459876/476792)M
        finish: 3:03:06 speed: 42,844 (36,616) K/sec

Clúster

Instalar paquetes de pacemaker

Agentes de cercado

    yum -y install fence-agents-apc fence-agents-apc-snmp
    fence_apc --help
    fence_apc_snmp --help

Pacemaker

    dnf -y install corosync pcs pacemaker pacemaker-doc

Pacemaker drbd resource

    dnf -y install drbd-pacemaker 

Paquetes necesarios para el sistema de archivos gs2 (necesita control de bloqueo de clúster)

    dnf -y install gfs2-utils lvm2-cluster dlm

Iniciar y configurar clúster

    systemctl start pcsd
    systemctl enable pcsd
    passwd hacluster

Host name resolution must be set in /etc/hosts

vserver4:
    echo "vserver4" > /etc/hostname
    echo "10.1.1.24   vserver4" >> /etc/hosts
    echo "10.1.1.25   vserver5" >> /etc/hosts
    exit
vserver5: 

    echo "10.1.1.24   vserver4" >> /etc/hosts
    echo "10.1.1.25   vserver5" >> /etc/hosts
    echo "vserver5" > /etc/hostname
    exit

In one node only!:

    server1=vserver4
    server2=vserver5
    cl_name=vservers

    pcs cluster auth $server1 $server2
    pcs cluster setup --name $cl_name $server1 $server2
    pcs cluster start --all
    pcs status

    [root@vserver4 ~]#     pcs status
    Cluster name: vservers
    WARNING: no stonith devices and stonith-enabled is not false
    WARNING: corosync and pacemaker node names do not match (IPs used in setup?)
    Last updated: Sun Oct 25 23:37:34 2015      Last change: 
    Stack: unknown
    Current DC: NONE
    0 nodes and 0 resources configured


    Full list of resources:


    PCSD Status:
      vserver4 member (vserver4): Online
      vserver5 member (vserver5): Online

    Daemon Status:
      corosync: active/disabled
      pacemaker: active/disabled
      pcsd: active/enabled

Check that cluster config is loaded as expected

    [root@vserver4 ~]# pcs cluster cib |grep vserver
    <cib crm_feature_set="3.0.10" validate-with="pacemaker-2.3" epoch="5" num_updates="8" admin_epoch="0" cib-last-written="Sun Oct 25 23:52:24 2015" update-origin="vserver5" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
            <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="vservers"/>
          <node id="1" uname="vserver4"/>
          <node id="2" uname="vserver5"/>
        <node_state id="2" uname="vserver5" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
        <node_state id="1" uname="vserver4" in_ccm="true" crmd="online" crm-debug-origin="do_state_transition" join="member" expected="member">
    [root@vserver4 ~]# grep vserver /etc/corosync/corosync.conf 
    cluster_name: vservers
            ring0_addr: vserver4
            ring0_addr: vserver5

Cercado

    [root@vserver4 ~]# pcs stonith list 
    fence_apc - Fence agent for APC over telnet/ssh
    fence_apc_snmp - Fence agent for APC, Tripplite PDU over SNMP
    pcs stonith describe fence_apc_snmp

You can check if your stonith is reacheable and working:

    fence_apc_snmp --ip=stonith1 --action=monitor
    fence_apc_snmp --ip=stonith1 --action=monitor --community=escola2015
    fence_apc_snmp --ip=stonith1 --action=reboot --plug=6  --community=escola2015 --power-wait=5

Configure stonith resources as ssh (discarded as it is too slow)

    #pwd1=$(cat /root/pwd1)
    #pwd2=$(cat /root/pwd2)

    pcs stonith delete stonith1

    pcs cluster cib stonith_cfg


    pcs -f stonith_cfg stonith create stonith1 fence_apc ipaddr=10.1.1.3 login=vservers passwd=$pwd1 pcmk_host_list="vserver4 vserver5" pcmk_host_map="vserver4:4;vserver5:5"
    #pcs -f stonith_cfg stonith create  stonith2 fence_apc ipaddr=10.1.1.3 login=vserver5 passwd=$pwd2 pcmk_host_list="vserver5" pcmk_host_map="vserver5:3"
    pcs -f stonith_cfg property set stonith-enabled=false

    pcs cluster cib-push stonith_cfg

Configure stonith resource as snmp (we use this one)

    pcs stonith delete stonith1
    pcs cluster cib stonith_cfg
    pcs -f stonith_cfg stonith create stonith1 fence_apc_snmp params ipaddr=10.1.1.3 pcmk_host_list="vserver4,vserver5" pcmk_host_map="vserver4:4;verver5:5" pcmk_host_check=static-list power_wait=5
    pcs cluster cib-push stonith_cfg

Activate stonith resource:

    pcs property set stonith-enabled=true

Tests (warning, will reboot nodes!)

    pcs cluster stop vserver5
    stonith_admin --reboot vserver5
    pcs cluster start --all    
    pcs cluster stop vserver4
    stonith_admin --reboot vserver4

While configuring cluster you may disable fencing:

    pcs property set stonith-enabled=false

Check stonith resource definition

    pcs stonith show --full

You'll find logs in:

    tail -f /var/log/pacemaker.log 

PACEMAKER DRBD

    echo drbd > /etc/modules-load.d/drbd.conf
    pcs resource create drbd-vdisks ocf:linbit:drbd drbd_resource=vdisks op monitor interval=60s
    pcs resource master drbd-vdisks-clone drbd-opt master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 notify=true

dlm

Necesitamos bloqueo de clúster para el sistema de archivos gfs2

    pcs cluster cib dlm_cfg
    pcs -f dlm_cfg resource create dlm ocf:pacemaker:controld op monitor interval=60s
    pcs -f dlm_cfg resource clone dlm clone-max=2 clone-node-max=1
    pcs cluster cib-push dlm_cfg

Lvm de clúster

Configurar lvms de clúster

    systemctl disable lvm2-lvmetad.service
    systemctl disable lvm2-lvmetad.socket
    systemctl stop lvm2-lvmetad.service

    lvmconf --enable-cluster
    reboot

You should define the devices where lvm will look for lvm signatures in file /etc/lvm/lvm.conf:

    filter = ["a|sd.*|", "a|md.*|", "a|drbd.*|", "r|.*|"]

Configurar lvms de bloqueo de clúster

    pcs cluster cib clvmd_cfg
    pcs -f clvmd_cfg resource create clvmd ocf:heartbeat:clvm params daemon_options="timeout=30s" op monitor interval=60s
    pcs -f clvmd_cfg resource clone clvmd clone-max=2 clone-node-max=1
    pcs cluster cib-push clvmd_cfg

Crear volúmenes

In cada servidor:

    pvcreate /dev/drbd0

If we need to do cluster actions we should use -ci:

    -cn ==> local action
    -cy ==> cluster wide action
Now we can continue with cluster wide commands:
    vgcreate -cy vgcluster /dev/drbd0

We can check on the other node if the vg was created:

    [root@vserver5 ~]# vgs
      VG        #PV #LV #SN Attr   VSize   VFree  
      vg_data     2   1   0 wz--n- 558,79g   1,16g
      vgcluster   1   0   0 wz--nc 465,62g 465,62g

We keep some free space just in case we want to do io tests:

    lvcreate -l 97%FREE -n lvcluster1 vgcluster /dev/drbd0

And check it fromk the other server:

    [root@vserver4 ~]# lvs
      LV         VG        Attr       LSize   Pool            Origin          Data%  Meta%  Move Log Cpy%Sync Convert
      lv_data    vg_data   Cwi-aoC--- 465,63g [lv_cache_data] [lv_data_corig] 0,00   0,82            0,00            
      lvcluster1 vgcluster -wi-a----- 451,65g    

Cómo ejecutar el cercado desde el mismo drbd

In 'handlers' section in /etc/drbd.d/global_common.conf:

    fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
    after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
In 'disk' section:
    fencing resource-and-stonith;
This is the result:
    [root@vserver4 ~]# cat /etc/drbd.d/global_common.conf 
    global {
        usage-count yes;
    }

    common {
        handlers {
            split-brain "/usr/lib/drbd/notify-split-brain.sh root";
            fence-peer "/usr/lib/drbd/crm-fence-peer.sh";
            after-resync-target "/usr/lib/drbd/crm-unfence-peer.sh";
        }

        startup {
        }

        options {
        }

        disk {
            fencing resource-and-stonith;
        }

        net {
            protocol C;

            allow-two-primaries;
            after-sb-0pri discard-zero-changes;
            after-sb-1pri discard-secondary;
            after-sb-2pri disconnect;
        }
    }
Check that pacemaker and stonith are working:
    [root@vserver4 ~]# pcs property list
    Cluster Properties:
     cluster-infrastructure: corosync
     cluster-name: vservers
     dc-version: 1.1.13-3.fc22-44eb2dd
     have-watchdog: false
     stonith-enabled: true

Check again that stonith is enabled:

    pcs property set stonith-enabled=true

Restricciones

dlm and clvmd must be started in order:

pcs cluster cib cons/traints_cfg
pcs constraint order set drbd-vdisks-clone action=promote \
set dlm-clone clvmd-clone action=start \
sequential=true
pcs cluster cib-push constraints_cfg    

DRBD

Configuración de ejemplo de drbd en clúster gfs2.

yum install drbd drbd-utils drbd-udev drbd-pacemaker -y
modprobe drbd
systemctl enable drbd
We create drbd resources (/etc/drbd.d/...)
drbdadm create-md bases
drbdadm create-md templates
drbdadm create-dm grups
drbdadm up bases
drbdadm up templates
drbdadm up grups
drbdadm primary bases --force
drbdadm primary templates --force
drbdadm primary grups --force

Recursos dlm y clvm2

Lvms agrupados

pcs cluster cib locks_cfg
pcs -f locks_cfg resource create dlm ocf:pacemaker:controld op monitor interval=60s --group cluster_lock
pcs -f locks_cfg resource create clvmd ocf:heartbeat:clvm params daemon_options="timeout=30s" op monitor interval=60s  --group cluster_lock
pcs -f locks_cfg resource clone cluster_lock clone-max=2 clone-node-max=1 on-fail=restart 
pcs cluster cib-push locks_cfg

Sin lvms agrupados

pcs resource create dlm ocf:pacemaker:controld op monitor interval=60s 
pcs resource clone dlm clone-max=2 clone-node-max=1 on-fail=restart

Recursos DRBD

pcs cluster cib drbd_bases_cfg
pcs -f drbd_bases_cfg resource create drbd_bases ocf:linbit:drbd drbd_resource=bases op monitor interval=60s
pcs -f drbd_bases_cfg resource master drbd_bases-clone drbd_bases master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
pcs cluster cib-push drbd_bases_cfg

pcs cluster cib drbd_templates_cfg
pcs -f drbd_templates_cfg resource create drbd_templates ocf:linbit:drbd drbd_resource=templates op monitor interval=60s
pcs -f drbd_templates_cfg resource master drbd_templates-clone drbd_templates master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
pcs cluster cib-push drbd_templates_cfg

pcs cluster cib drbd_grups_cfg
pcs -f drbd_grups_cfg resource create drbd_grups ocf:linbit:drbd drbd_resource=grups op monitor interval=60s
pcs -f drbd_grups_cfg resource master drbd_grups-clone drbd_grups master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
pcs cluster cib-push drbd_grups_cfg

Sistema de archivos GFS2

mkfs.gfs2 -p lock_dlm -t vimet_cluster:bases -j 2 /dev/drbd10
mkfs.gfs2 -p lock_dlm -t vimet_cluster:templates -j 2 /dev/drbd11
mkfs.gfs2 -p lock_dlm -t vimet_cluster:grups -j 2 /dev/drbd30

Recursos GFS2

pcs resource create gfs2_bases Filesystem device="/dev/drbd10" directory="/vimet/bases" fstype="gfs2" "options=defaults,noatime,nodiratime,noquota" op monitor interval=10s on-fail=restart clone clone-max=2 clone-node-max=1
pcs resource create gfs2_templates Filesystem device="/dev/drbd11" directory="/vimet/templates" fstype="gfs2" "options=defaults,noatime,nodiratime,noquota" op monitor interval=10s on-fail=restart clone clone-max=2 clone-node-max=1
pcs resource create gfs2_grups Filesystem device="/dev/drbd30" directory="/vimet/grups" fstype="gfs2" "options=defaults,noatime,nodiratime,noquota" op monitor interval=10s on-fail=restart clone clone-max=2 clone-node-max=1

Servidor NFS 4

pcs cluster cib nfsserver_cfg
pcs -f nfsserver_cfg resource create nfs-daemon systemd:nfs-server \
nfs_shared_infodir=/nfsshare/nfsinfo nfs_no_notify=true \
--group nfs_server
pcs -f nfsserver_cfg resource create nfs-root exportfs \
clientspec=10.1.0.0/255.255.0.0 \
options=rw,async,wdelay,no_root_squash,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash \
directory=/vimet \
fsid=0 \
--group nfs_server
pcs cluster cib-push nfsserver_cfg
pcs resource clone nfs_server master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 on-fail=restart notify=true resource-stickiness=0

Exportaciones NFS 4

pcs cluster cib exports_cfg
pcs -f exports_cfg resource create nfs_bases exportfs \
clientspec=10.1.0.0/255.255.0.0 \
options=rw,async,wdelay,no_root_squash,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash directory=/vimet/bases \
fsid=11 \
clone master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 on-fail=restart notify=true resource-stickiness=0

pcs -f exports_cfg resource create nfs_templates exportfs \
clientspec=10.1.0.0/255.255.0.0 \
options=rw,async,wdelay,no_root_squash,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash directory=/vimet/templates \
fsid=21 \
clone master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 on-fail=restart notify=true resource-stickiness=0

pcs -f exports_cfg resource create nfs_grups exportfs \
clientspec=10.1.0.0/255.255.0.0 \
options=rw,async,wdelay,no_root_squash,no_subtree_check,sec=sys,rw,secure,no_root_squash,no_all_squash directory=/vimet/grups \
fsid=31 \
clone master-max=2 master-node-max=1 clone-max=2 clone-node-max=1 on-fail=restart notify=true resource-stickiness=0
pcs cluster cib-push exports_cfg

IPs flotantes

pcs resource create ClusterIPbases ocf:heartbeat:IPaddr2 ip=10.1.2.210 cidr_netmask=32 nic=nas:10 clusterip_hash=sourceip-sourceport-destport meta resource-stickiness=0 op monitor interval=5 clone globally-unique=true clone-max=2 clone-node-max=2 on-fail=restart resource-stickiness=0
pcs resource create ClusterIPtemplates ocf:heartbeat:IPaddr2 ip=10.1.2.211 cidr_netmask=32 nic=nas:11 clusterip_hash=sourceip-sourceport-destport meta resource-stickiness=0 op monitor interval=5 clone globally-unique=true clone-max=2 clone-node-max=2 on-fail=restart resource-stickiness=0
pcs resource create ClusterIPgrups ocf:heartbeat:IPaddr2 ip=10.1.2.212 cidr_netmask=32 nic=nas:30 clusterip_hash=sourceip-sourceport-destport meta resource-stickiness=0 op monitor interval=5 clone globally-unique=true clone-max=2 clone-node-max=2 on-fail=restart resource-stickiness=0
pcs resource create ClusterIPcnasbases ocf:heartbeat:IPaddr2 ip=10.1.1.28 cidr_netmask=32 nic=nas:110 clusterip_hash=sourceip-sourceport-destport meta resource-stickiness=0 op monitor interval=5 clone globally-unique=true clone-max=2 clone-node-max=2 on-fail=restart resource-stickiness=0
pcs resource create ClusterIPcnastemplates ocf:heartbeat:IPaddr2 ip=10.1.1.29 cidr_netmask=32 nic=nas:111 clusterip_hash=sourceip-sourceport-destport meta resource-stickiness=0 op monitor interval=5 clone globally-unique=true clone-max=2 clone-node-max=2 on-fail=restart resource-stickiness=0
pcs resource create ClusterIPcnasgrups ocf:heartbeat:IPaddr2 ip=10.1.1.30 cidr_netmask=32 nic=nas:130 clusterip_hash=sourceip-sourceport-destport meta resource-stickiness=0 op monitor interval=5 clone globally-unique=true clone-max=2 clone-node-max=2 on-fail=restart resource-stickiness=0

Restricciones

Orden de inicio y parada y restricciones

pcs constraint order \
    set stonith action=start \
    set cluster_lock-clone action=start \
    set nfs_server-clone action=start \
    require-all=true sequential=true \
    setoptions kind=Mandatory id=serveis

pcs constraint order \
    set drbd_bases-clone action=promote role=Master \
    set gfs2_bases-clone \
    set nfs_bases-clone \
    set ClusterIPcnasbases-clone action=start \
    set ClusterIPbases-clone action=start \
    require-all=true sequential=true \
    setoptions kind=Mandatory id=bases

pcs constraint order \
    set drbd_templates-clone action=promote role=Master \
    set gfs2_templates-clone \
    set nfs_templates-clone \
    set ClusterIPcnastemplates-clone action=start \
    set ClusterIPtemplates-clone action=start \
    require-all=true sequential=true \
    setoptions kind=Mandatory id=templates

pcs constraint order \
    set drbd_grups-clone action=promote role=Master \
    set gfs2_grups-clone \
    set nfs_grups-clone \
    set ClusterIPcnasgrups-clone action=start \
    set ClusterIPgrups-clone action=start \
    require-all=true sequential=true \
    setoptions kind=Mandatory id=grups

Restricciones de ubicación

pcs constraint colocation add \
    ClusterIPbases-clone with nfs_bases-clone INFINITY \
    id=colocate_bases

pcs constraint colocation add \
    ClusterIPtemplates-clone with nfs_templates-clone INFINITY \
    id=colocate_templates

pcs constraint colocation add \
    ClusterIPgrups-clone with nfs_grups-clone INFINITY \
    id=colocate_grups

pcs constraint colocation add \
    ClusterIPcnasbases-clone with nfs_bases-clone INFINITY \
    id=colocate_cnasbases

pcs constraint colocation add \
    ClusterIPcnastemplates-clone with nfs_templates-clone INFINITY \
    id=colocate_cnastemplates

pcs constraint colocation add \
    ClusterIPcnasgrups-clone with nfs_grups-clone INFINITY \
    id=colocate_cnasgrups
    set gfs2_grups-clone \
    set nfs_grups-clone \
    set ClusterIPcnasgrups-clone action=start \
    set ClusterIPgrups-clone action=start \
    require-all=true sequential=true \
    setoptions kind=Mandatory id=grups