saltstack相关实践1
为了更好地使用saltstack ,本节通过saltstack的配置管理来实现一个中小型web架构,主要功能如下
- 系统初始化
- haproxy服务
- keeplived服务
- nginx
- php
- memcached
- 系统初始化,比如安装监控,调整内核参数,设置dns等
- 功能模块 ,比如nginx,phpmhaproxy等的安装管理
- 业务模块,不同的业务通过include功能模块来实现
1.环境规划
192.168.56.101 安装master minion haproxy+keepalived nginx+php
192.168.56.102 安装minion macached haproxy+keepalived nginx+php
2.环境设置
使用base和prod环境,base用于系统初始化实现,prod用于放置生产的配置管理
配置master
file_roots:
base:
- /srv/salt/base
prod:
- /srv/salt/prod
pillar_roots:
base:
- /srv/pillar/base
prod:
- /srv/pillar/prod
3.系统初始化模块书写
放在/srv/salt/base/init目录下 配置文件放在/srv/salt/base/init/files目录下
3.1 配置dns
[root@localhost init]# cat dns.sls
/etc/resolv.conf:
file.managed:
- source: salt://init/files/resolv.conf
- user: root
- group: root
- mode: 644
3.2 配置history
[root@localhost init]# cat history.sls
/etc/profile:
file.append:
- text:
- export HISTTIMEFORMAT="%F %T `whoami` "
3.3配置命令操作审计
[root@localhost init]# cat cmd.sls
/etc/bashrc:
file.append:
- text:
- export PROMPT_COMMAND='{ msg=$(history 1 | { read x y; echo $y; });logger "[euid=$(whoami)]":$(who am i):[`pwd`]"$msg"; }'
3.4 调整内核参数,配置可打开的最大文件数,开启ip转发,尽量不使用swap分区
[root@localhost init]# cat sysctl.sls
fs.file-max:
sysctl.present:
- value: 200000
net.ipv4.ip_forward:
sysctl.present:
- value: 1
vm.swappiness:
sysctl.present:
- value: 0
3.5 配置epel源
[root@localhost init]# cat epel.sls
yum_repo_release:
pkg.installed:
- sources:
- epel-release: http://mirrors.aliyun.com/epel/7/x86_64/epel-release-7-3.noarch.rpm
unless: rpm -qa| grep epel-release-7-3
3.6配置zabbix
[root@localhost init]# cat zabbix_agent.sls
zabbix-agent:
pkg.installed:
- name: zabbix22-agent
file.managed:
- name: /etc/zabbix_agentd.conf
- source: salt://init/files/zabbix_agentd.conf
- template: jinja
- defaults:
Zabbix_Server: {{ pillar['zabbix-agent']['Zabbix_Server'] }}
- require:
- pkg: zabbix-agent
service.running:
- name: zabbix-agentd
- enable: True
- watch:
- file: zabbix-agent
zabbix的pillar相关配置
[root@localhost init]# cat /srv/pillar/base/top.sls
base:
'*':
- zabbix
[root@localhost init]# cat /srv/pillar/base/zabbix.sls
zabbix-agent:
Zabbix_Server: 192.168.56.101
3.6 初始化环境引用
[root@localhost init]# cat base.sls
include:
- init.dns
- init.cmd
- init.history
- init.sysctl
- init.epel
- init.zabbix_agent
最后配置top.sls
[root@localhost base]# cat /srv/salt/base/top.sls
base:
'*':
- init.base
执行salt ‘*’ state.highstate test=True 测试一下
4.Haproxy配置
4.1 pkg配置
[root@localhost init]# cat /srv/salt/prod/pkg/pkg-init.sls
pkg-init:
pkg.installed:
- names:
- gcc
- gcc-c++
- glibc
- make
- autoconf
- libjpeg-turbo
- libjpeg-turbo-devel
- libpng
- libpng-devel
- freetype
- freetype-devel
- libxml2
- libxml2-devel
- zlib
- zlib-devel
- libcurl
- libcurl-devel
- openssl
- openssl-devel
- swig
- mysql
- mysql-devel
4.2 安装haproxy,目录在/srv/salt/prod/haproxy
[root@localhost haproxy]# ls files/
haproxy-1.5.3.tar.gz haproxy.init
[root@localhost haproxy]# cat install.sls
include:
- pkg.pkg-init
haproxy-install:
file.managed:
- name: /usr/local/src/haproxy-1.5.3.tar.gz
- source: salt://haproxy/files/haproxy-1.5.3.tar.gz
- user: root
- group: root
- mode: 755
cmd.run:
- name: cd /usr/local/src && tar zxf haproxy-1.5.3.tar.gz && cd haproxy-1.5.3 && make TARGET=linux26 PREFIX=/usr/local/haproxy && make install PREFIX=/usr/local/haproxy
- unless: test -d /usr/local/haproxy
- require:
- pkg: pkg-init
- file: haproxy-install
/etc/init.d/haproxy:
file.managed:
- source: salt://haproxy/files/haproxy.init
- mode: 755
- user: root
- group: root
- require:
- cmd: haproxy-install
haproxy-init:
cmd.run:
- name: chkconfig --add haproxy
- unless: chkconfig --list | grep haproxy
- require:
- file: /etc/init.d/haproxy
net.ipv4.ip_nonlocal_bind:
sysctl.present:
- value: 1
haproxy-config-dir:
file.directory:
- name: /etc/haproxy
- mode: 755
- user: root
- group: root
4.3 haproxy业务应用
配置文件目录/srv/salt/prod/cluster/files
[root@localhost files]# cat haproxy-outside.cfg
global
maxconn 100000
chroot /usr/local/haproxy
uid 99
gid 99
daemon
nbproc 1
pidfile /usr/local/haproxy/logs/haproxy.pid
log 127.0.0.1 local3 info
defaults
option http-keep-alive
maxconn 100000
mode http
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
listen stats #开启状态监控
mode http
bind 0.0.0.0:8888
stats enable
stats uri /haproxy-status
stats auth haproxy:saltstack
frontend frontend_www_example_com #前端设置
bind 192.168.56.100:80
mode http
option httplog
log global
default_backend backend_www_example_com
backend backend_www_example_com #后端设置
option forwardfor header X-REAL-IP
option httpchk HEAD / HTTP/1.0
balance source
server web-node1 192.168.56.101:8080 check inter 2000 rise 30 fall 15
server web-node2 192.168.56.102:8080 check inter 2000 rise 30 fall 15
服务管理sls ,目录为/srv/salt/prod/cluster
[root@localhost cluster]# cat haproxy-outside.sls
include:
- haproxy.install
haproxy-service:
file.managed:
- name: /etc/haproxy/haproxy.cfg
- source: salt://cluster/files/haproxy-outside.cfg
- user: root
- group: root
- mode: 644
service.running:
- name: haproxy
- enable: True
- reload: True
- require:
- cmd: haproxy-install
- cmd: haproxy-init
- watch:
- file: haproxy-service
执行haproxy状态,top文件内容如下
base:
'*':
- init.base
prod:
'*':
- cluster.haproxy-outside
salt ‘*’ state.highstate test=True
5.keepalived配置管理,
5.1 keepalived的安装 目录为/srv/salt/prod/keepalived
[root@localhost keepalived]# ls files/
keepalived-1.2.17.tar.gz keepalived.init keepalived.sysconfig
[root@localhost keepalived]# cat install.sls
include:
- pkg.pkg-init
keepalived-install:
file.managed:
- name: /usr/local/src/keepalived-1.2.17.tar.gz
- source: salt://keepalived/files/keepalived-1.2.17.tar.gz
- user: root
- group: root
- mode: 755
cmd.run:
- name: cd /usr/local/src && tar zxf keepalived-1.2.17.tar.gz && ./configure --prefix=/usr/local/keepalived --disable-fwmark && make && make install
- unless: test -d /usr/local/keepalived
- require:
- pkg: pkg-init
- file: keepalived-install
/etc/sysconfig/keepalived:
file.managed:
- source: salt://keepalived/files/keepalived.sysconfig
- mode: 644
- user: root
- group: root
/etc/init.d/keepalived:
file.managed:
- source: salt://keepalived/files/keepalived.init
- user: root
- group: root
- mode: 755
keepalived-init:
cmd.run:
- name: chkconfig --add keepalived
- unless: chkconfig --list | grep keepalived
- requrie:
- file: /etc/init.d/keepalived
/etc/keepalived:
file.directory:
- user: root
- group: root
- mode: 755
5.2 keepalived业务应用
配置文件目录 /srv/salt/prod/cluster/files
[root@localhost files]# cat haproxy-outside-keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
saltstack@example.com
}
notification_email_from keepalived@example.com
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id {{ROUTEID}}
}
vrrp_instance haproxy_ha {
state {{STATEID}}
interface eth0
virtual_router_id 36
priority {{PRIORITYID}}
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.56.100
}
}
服务管理sls
文件目录为/srv/salt/prod/cluster
[root@localhost cluster]# cat haproxy-outside-keepalived.sls
include:
- keepalived.install
keepalived-service:
file.managed:
- name: /etc/keepalived/keepalived.conf
- source: salt://cluster/files/haproxy-outside-keepalived.conf
- mode: 644
- user: root
- group: root
- template: jinja
{% if grains['fqdn'] == 'saltstack-node1.example.com' %}
- ROUTEID: haproxy_ha
- STATEID: MASTER
- PRIORITYID: 150
{% elif grains['fqdn'] == 'saltstack-node2.example.com' %}
- ROUTEID: haproxy_ha
- STATEID: BACKUP
- PRIORITYID: 100
{% endif %}
service.running:
- name: keepalived
- enalbe: True
- require:
- cmd: keepalived-install
- watch:
- file: keepalived-service
使用fqdn名判断哪个为主节点
执行keepalived状态
top文件如下
base:
'*':
- init.base
prod:
'*':
- cluster.haproxy-outside
- cluster.haproxy-outside-keepalived
salt ‘*’ state.highstate test=True
执行haproxy+keepalived测试
我们可以down一个节点模拟故障 使用ip add li 查看虚拟ip所在节点