pacemaker和haproxy

  • Post author:
  • Post category:其他


pacemaker



安装操作:

yum install pacemaker -y corosync

yum install -y crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm

cd /etc/corosync/

cp corosync.conf.example corosync.conf

vim corosync.conf ##编辑配置文件

totem {
        version: 2
        secauth: off
        threads: 0
        interface {
                ringnumber: 0
                bindnetaddr: 172.25.33.0    ##监控网段
                mcastaddr: 226.94.1.33      ##多播地址
                mcastport: 5438             ##多播端口
                ttl: 1
        }
}
service {               ##添加的服务
        name: pacemaker
        ver:0
}

scp corosync.conf server4:/etc/corosync/ ##将配置文件复制到另一节点

/etc/init.d/corosync start ##两边节点都开启服务

[root@server1 corosync]# crm configure show

这里写图片描述

[root@server1 corosync]# crm

crm(live)# configure

crm(live)configure# property stonith-enabled=false

crm(live)configure# commit

crm(live)configure# bye

[root@server1 corosync]# crm_verify -VL ##认证查看

[root@server1 corosync]# crm

crm(live)# configure

crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.33.100 cidr_netmask=32 op monitor interval=30s ##添加vip资源

crm(live)configure# commit

crm(live)configure# bye

[root@server1 corosync]#

测试:

[root@server1 corosync]# crm node standby ##关闭该结点

[root@server1 corosync]# crm node online

[root@server4 corosync]# crm_mon ##监控命令查看状态

这里写图片描述

drbd的配置


server1和server4上做如下操作


先添加同等大小的虚拟磁盘到节点上,保证两台结点的一致性

tar zxf drbd-8.4.2.tar.gz

cd drbd-8.4.2

./configure –enable-spec –with-km

cp ~/drbd-8.4.2.tar.gz /root/rpmbuild/SOURCES/

rpmbuild -bb drbd.spec

yum install kernel-devel -y

rpmbuild -bb drbd-km.spec

cd /root/rpmbuild/RPMS/x86_64/ ##在该目录下生成的rpm包

rpm -ivh * ##全部安装

scp * server4: ##复制到另一台节点上,并安装软件包

cd /etc/drbd.d

vim sqldata.res ##编辑配置文件

resource sqldata {

meta-disk internal;

device /dev/drbd1;

syncer {

verify-alg sha1;

}

on server1 {

disk /dev/vdb; address

172.25.33.1:7789;

}

on server4 {

disk /dev/vdb;

address 172.25.33.4:7789;

}

}

scp sqldata.res server4:/etc/drbd.d/ ##将该文件复制到另一台机器上

drbdadm create-md sqldata ##初始化

/etc/init.d/drbd start ##两个机器上都开启

这里写图片描述

drbdadm primary sqldata –force ##只有主节点做该操作,强制同步磁盘

这里写图片描述

mkfs.ext4 /dev/drbd1 ##格式化文件系统

mysql

只有主节点对drbd有操作权限


在主结点上

mount /dev/drbd1 /var/lib/mysql

chown mysql.mysql /var/lib/mysql

/etc/init.d/mysqld start

/etc/init.d/mysqld stop

umount /var/lib/mysql/

drbdadm secondary sqldata ##更改主节点为次节点


在次节点上

drbdadm primary sqldata

mount /dev/drbd1 /var/lib/mysql

chown mysql.mysql /var/lib/mysql

/etc/init.d/mysqld start

/etc/init.d/mysqld stop

umount /var/lib/mysql/

drbdadm secondary sqldata ##更改回次节点

fence配置

检查好物理机,以及虚拟机的的fence配置

添加如下策略

[root@server4 ~]# crm configure show

node server1 \

attributes standby=”off”

node server4 \

attributes standby=”off”

primitive vip ocf:heartbeat:IPaddr2 \

params ip=”172.25.33.100” cidr_netmask=”32” \

op monitor interval=”30s”

primitive vmfence stonith:fence_xvm \

params pcmk_host_map=”server1:vm1;server4:vm4” \ ##添加对应

op monitor interval=”1min”

property $id=”cib-bootstrap-options” \

stonith-enabled=”true” \ ##开启fence

no-quorum-policy=”ignore” \ ##忽略检测最少主机

expected-quorum-votes=”2” \

dc-version=”1.1.10-14.el6-368c726” \

cluster-infrastructure=”classic openais (with plugin)”

测试:

如果内核崩溃,主机可以跳电

[root@server1 ~]# crm
crm(live)# configure 
crm(live)configure# primitive DBdata ocf:linbit:drbd params drbd_resource=sqldata op monitor interval=1min
crm(live)configure# ms DBdataclone DBdata meta master-max=1 master-node-max=1 clone-max=2 clone-node-max=1 notify=true
crm(live)configure# commit
WARNING: DBdata: default timeout 20s for start is smaller than the advised 240
WARNING: DBdata: default timeout 20s for stop is smaller than the advised 100
WARNING: DBdata: action monitor not advertised in meta-data, it may not be supported by the RA
crm(live)configure# primitive DBfs ocf:heartbeat:Filesystem params device=/dev/drbd1 directory=/var/lib/mysql fstype=ext4
crm(live)configure# colocation fs_on_drbd inf: DBfs DBdataclone:Master
crm(live)configure# order DBfs-after-DBdata inf: DB
DBdataclone   DBfs          
crm(live)configure# order DBfs-after-DBdata inf: DBdataclone:promote DBfs:start
crm(live)configure# commit
WARNING: DBfs: default timeout 20s for start is smaller than the advised 60
WARNING: DBfs: default timeout 20s for stop is smaller than the advised 60
crm(live)configure# primitive mysqlDB lsb:mysqld op monitor interval=30s
crm(live)configure# group mysqlservice vip DBfs mysqlDB
INFO: resource references in colocation:fs_on_drbd updated
INFO: resource references in order:DBfs-after-DBdata updated
crm(live)configure# commit
crm(live)configure# show

这里写图片描述

测试;

可以自由切换

这里写图片描述

haproxy的基础

yum install haproxy -y

vim /etc/haproxy/haproxy.cfg

global
        maxconn         10000
        stats socket    /var/run/haproxy.stat mode 600 level admin
        log             127.0.0.1 local0
        uid             200
        gid             200
        chroot          /var/empty
        daemon
defaults
         mode            http
        log             global
        option          httplog
        option          dontlognull
        monitor-uri     /monitoruri
        maxconn         8000
 option redispatch
        timeout connect 5s
        timeout server  5s
        timeout client  30s
        retries         2

        stats uri       /admin/stats
        stats auth admin:westos
        stats refresh 5s
frontend public
        bind            *:80
        acl badhost src 172.25.33.250 
        acl write method PUT
        acl write method POST
        use_backend dynamic if write ##读写分离
        #http-request deny if badhost
        #errorloc 403 http://172.25.33.4 if badhost  ##访问错误页面,被重定向
        redirect location http://172.25.33.4 if badhost  ##只要访问该页面均被重定向
        use_backend dynamic if { path_beg /images   }
        default_backend static

backend static
      balance   roundrobin
      server    web1 172.25.33.2:80  check inter 1000
backend dynamic
     balance      roundrobin
     server       web2 172.25.33.3:80  check inter 1000

/etc/init.d/haproxy reload ##重新加载配置文件

测试页面:

http://172.25.33.1/adminstats

这里写图片描述



版权声明:本文为houxuerong原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。