前期搭建的是简易的单ETCD集群,目前需要升级为高可用的集群。采用keepalived+haproxy
实现:
一、Docker新增加ECED的2个节点,其中之一节点的docker-compose文件如下,多个节点对应修改即可,注意以下标红的地方,否则会报错或新旧集群冲突问题:
version: '2'
services:
Etcd:
image: bitnami/etcd:3.5.13-debian-12-r9
container_name: etcd3.5
environment:
- TZ=Asia/Shanghai
- ALLOW_NONE_AUTHENTICATION=yes
- ETCD_NAME=etcd2
- ETCD_ADVERTISE_CLIENT_URLS=http://10.0.xx.20:2379 #告知集群自己的客户端地址
- ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 #设置监听客户端通讯的URL列表
- ETCD_INITIAL_ADVERTISE_PEER_URLS=http://10.0.xx.20:2380 #告知集群自己集群通讯地址
- ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 #用于监听伙伴通讯的URL列表
- ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster #etcd 集群的初始化集群记号
- ETCD_INITIAL_CLUSTER=etcd2=http://10.0.xx.21:2380,etcd1=http://10.0.xx.20:2380,etcd3=http://10.0.xx.22:2380 #集群成员
- ETCD_ACTIVE_ENDPOINTS=10.0.xx.21:2380,10.0.xx.20:2380,10.0.xx.22:2380
#注意以上只需要写已经活动的节点,新加入第一个节点第一次只会有一个,后续数据同步完成可以都加上
- ETCD_INITIAL_CLUSTER_STATE=existing#第一个有数据的旧节点是new
- ETCD_ELECTION_TIMEOUT=10000
ports:
- 2379:2379
- 2380:2380
volumes:
- /xxxxxxxx/etcd/config/etcd.conf.yml:/opt/bitnami/etcd/conf/etcd.conf.yml
- /xxxxxxxxx/etcd/data:/bitnami/etcd/data
二、安装haproxy负载均衡:
在乌邦图操作系统安装如下:
1)安装
sudo apt install software-properties-common
#安装最新指定版本,不安装默认版本
sudo add-apt-repository ppa:vbernat/haproxy-2.6 -y
sudo apt install haproxy -y
2)备份配置文件
mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.bak
3)编辑配置文件
vim /etc/haproxy/haproxy.cfg
#---------------------------------------------------------------------
# Example configuration for a possible web application. See the
# full configuration options online.
#
# https://www.haproxy.org/download/2.1/doc/configuration.txt
# https://cbonte.github.io/haproxy-dconv/2.1/configuration.html
#
#---------------------------------------------------------------------
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
# to have these messages end up in /var/log/haproxy.log you will
# need to:
#
# 1) configure syslog to accept network log events. This is done
# by adding the '-r' option to the SYSLOGD_OPTIONS in
# /etc/sysconfig/syslog
#
# 2) configure local2 events to go to the /var/log/haproxy.log
# file. A line like the following can be added to
# /etc/sysconfig/syslog
#
# local2.* /var/log/haproxy.log
#
log 127.0.0.1 local2
# chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
# user haproxy
# group haproxy
# daemon
# turn on stats unix socket
stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend etcd-server
mode tcp
bind *:12379 ## 监听2379端口
# bind *:443 ssl # To be completed ....
acl url_static path_beg -i /static /images /javascript /stylesheets
acl url_static path_end -i .jpg .gif .png .css .js
default_backend etcd-server
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend etcd-server
mode tcp # 模式tcp
balance roundrobin # 采用轮询的负载算法
# etcd-server backend # 配置etcd,端口2379
server etcd2 10.0.xx.20:2379 check
server etcd1 10.0.xx.21:2379 check
server etcd3 10.0.xx.22:2379 check
4)重启服务并设置开机重启:
sudo systemctl restart haproxy
sudo systemctl enable haproxy
sudo systemctl status haproxy
三、安装keepalived,并形成vip给k8s集群调用
本次采用docker-compose方式安装,yaml文件如下:
version: '3'
services:
keepalived:
image: osixia/keepalived
container_name: keepalived
network_mode: "host"
volumes:
- /xxxxx/keepalived/keepalived.conf:/container/service/keepalived/assets/keepalived.conf
cap_add:
- NET_ADMIN
- NET_BROADCAST
- NET_RAW
command: --copy-service
keepalived.conf文件如下:
global_defs {
script_user root
enable_script_security
}
vrrp_script chk_haproxy {
script "/bin/bash -c 'if [[ $(netstat -nlp | grep 12379) ]]; then exit 0; else exit 1; fi'" # haproxy 检测
interval 2 # 每2秒执行一次检测
weight 11 # 权重变化
}
vrrp_instance VI_1 {
interface ens160 #绑定节点网卡,不能出错
state MASTER # backup节点设为BACKUP
virtual_router_id 51 # id设为相同,表示是同一个虚拟路由组
priority 100 #初始权重
nopreempt #可抢占
unicast_peer {
}
virtual_ipaddress {
10.0.xx.27 # vip
}
authentication {
auth_type PASS
auth_pass password
}
track_script {
chk_haproxy
}
notify "/container/service/keepalived/assets/notify.sh"
}
各节点分别使用docker部署一个容器运行,配置文件根据需要微调。部署完成以后则原来的单节点ETCD升级为高可用集群。k8s管理节点只需要调用vip+端口即可。