欢迎投稿

今日深度:

redis+keepalived高可用实战,亲测已成功,rediskeepalived

redis+keepalived高可用实战,亲测已成功,rediskeepalived


192.168.2.180:redis1 keepalived-master 192.168.2.181:redis2 keepalived-slave 192.168.2.222:VIP
安装redis:两台机器同时进行# http://download.redis.io/redis-stable.tar.gz      wget -c http://download.redis.io/releases/redis-3.0.7.tar.gz     
tar zxf redis-3.0.7.tar.gz     
cd redis-3.0.7     
make     
cd     
\cp redis-3.0.7/src/redis-benchmark /usr/local/sbin/     
\cp redis-3.0.7/src/redis-check-aof /usr/local/sbin/     
\cp redis-3.0.7/src/redis-check-dump /usr/local/sbin/     
\cp redis-3.0.7/src/redis-cli /usr/local/sbin/     
\cp redis-3.0.7/src/redis-sentinel /usr/local/sbin/     
\cp redis-3.0.7/src/redis-server /usr/local/sbin/     
mkdir /etc/redis      mkdir -p /data/redis-6379/          
安装keepalived:两台机器同时进行:   # http://www.keepalived.org/documentation.html     
wget -c http://www.keepalived.org/software/keepalived-1.2.19.tar.gz     
tar zxf keepalived-1.2.19.tar.gz     
cd keepalived-1.2.19     
./configure --prefix=/usr/local/keepalived     
make     
make install     
cp /usr/local/keepalived/sbin/keepalived /usr/sbin/     
cp /usr/local/keepalived/etc/sysconfig/keepalived /etc/sysconfig/     
cp /usr/local/keepalived/etc/rc.d/init.d/keepalived /etc/init.d/      mkdir /etc/keepalived redis1和redis2的配置文件 cat >/etc/redis/sentinel.conf<<eof   
port 26379     
dir /tmp      sentinel monitor mymaster 192.168.2.180 6379 2 -------这里redis2上写192.1682.2.181  sentinel down-after-milliseconds mymaster 30000     
sentinel parallel-syncs mymaster 1     
sentinel failover-timeout mymaster 180000     
eof
Redis1上的redis-server的配置文件 cat > /etc/redis/redis-6379.conf <<eof     
# maxmemory 268435456     
maxmemory 256mb     
daemonize yes     
pidfile /data/redis-6379/redis-6379.pid     
port 6379     
bind 0.0.0.0     
tcp-backlog 511     
timeout 0     

tcp-keepalive 0     
loglevel notice     
logfile /data/redis-6379/redis.log     
databases 16     
save 900 1     
save 300 10     
save 60 10000     
stop-writes-on-bgsave-error yes     
rdbcompression yes     
rdbchecksum yes     
dbfilename dumpredis-6379.rdb     
dir /data/redis-6379     
slave-serve-stale-data yes     
slave-read-only yes     
repl-diskless-sync no     
repl-diskless-sync-delay 5     
# repl-ping-slave-period 10     
# repl-timeout 60     
repl-disable-tcp-nodelay no     
# repl-backlog-size 1mb     
# repl-backlog-ttl 3600     
slave-priority 100     
# min-slaves-to-write 3     
# min-slaves-max-lag 10     

appendonly no     
appendfilename "appendonly.aof"     
appendfsync everysec     
no-appendfsync-on-rewrite no     
auto-aof-rewrite-percentage 100     
auto-aof-rewrite-min-size 64mb     
aof-load-truncated yes     
lua-time-limit 5000     
slowlog-log-slower-than 10000     
slowlog-max-len 128     
latency-monitor-threshold 0     
notify-keyspace-events ""     
hash-max-ziplist-entries 512     
hash-max-ziplist-value 64     
list-max-ziplist-entries 512     
list-max-ziplist-value 64     
set-max-intset-entries 512     
zset-max-ziplist-entries 128     
zset-max-ziplist-value 64     
hll-sparse-max-bytes 3000     
activerehashing yes     
client-output-buffer-limit normal 0 0 0     
client-output-buffer-limit slave 256mb 64mb 60     
client-output-buffer-limit pubsub 32mb 8mb 60     
hz 10     
aof-rewrite-incremental-fsync yes     
eof Redis2与Redis3上的redis-server配置文件: cat > /etc/redis/redis-6379.conf <<eof   
slaveof 192.168.2.180 6379     
# maxmemory 268435456     
maxmemory 256mb     
daemonize yes     
pidfile /data/redis-6379/redis-6379.pid     
port 6379     
bind 0.0.0.0     
tcp-backlog 511     
timeout 0     
tcp-keepalive 0     
loglevel notice     
logfile /data/redis-6379/redis.log     
databases 16     
save 900 1     
save 300 10     
save 60 10000     
stop-writes-on-bgsave-error yes     
rdbcompression yes     
rdbchecksum yes     
dbfilename dumpredis-6379.rdb     
dir /data/redis-6379     
slave-serve-stale-data yes     
slave-read-only yes     
repl-diskless-sync no     
repl-diskless-sync-delay 5 
   
# repl-ping-slave-period 10     
# repl-timeout 60     
repl-disable-tcp-nodelay no     
# repl-backlog-size 1mb     
# repl-backlog-ttl 3600     
slave-priority 100     
# min-slaves-to-write 3     
# min-slaves-max-lag 10     
appendonly no     
appendfilename "appendonly.aof"     
appendfsync everysec     
no-appendfsync-on-rewrite no     
auto-aof-rewrite-percentage 100     
auto-aof-rewrite-min-size 64mb     
aof-load-truncated yes     
lua-time-limit 5000     
slowlog-log-slower-than 10000     
slowlog-max-len 128     
latency-monitor-threshold 0     
notify-keyspace-events ""     
hash-max-ziplist-entries 512     
hash-max-ziplist-value 64     
list-max-ziplist-entries 512     
list-max-ziplist-value 64     
set-max-intset-entries 512     
zset-max-ziplist-entries 128     
zset-max-ziplist-value 64     
hll-sparse-max-bytes 3000     
activerehashing yes     
client-output-buffer-limit normal 0 0 0     
client-output-buffer-limit slave 256mb 64mb 60     
client-output-buffer-limit pubsub 32mb 8mb 60     
hz 10     
aof-rewrite-incremental-fsync yes     
eof Redis1上的keepalived配置文件(vim /etc/keepalived/keepalived.conf) ! Configuration File for keepalived global_defs {    router_id REDIS_HA } vrrp_script chk_redis {          script "/etc/keepalived/scripts/redis_check.sh 127.0.0.1 6379"      interval 2      timeout 2      fall 3 } #vrrp_script chk_redis_2 #{ #     script "/etc/keepalived/scripts/redis_check_2.sh 127.0.0.1 6379" #     interval 2 #     timeout 2 #     fall 3 #} vrrp_instance RE_1 {     state MASTER     #state BACKUP     interface eth0     virtual_router_id 60     priority 110     #nopreempt # no seize,must add     advert_int 1     authentication {         auth_type PASS         auth_pass 1111     }     virtual_ipaddress {         192.168.2.222/24 dev eth0     }      track_script {          chk_redis     } notify_fault /etc/keepalived/scripts/redis_fault.sh notify_stop /etc/keepalived/scripts/redis_stop.sh  }
Redis2上的keepalived配置文件(vim /etc/keepalived/keepalived.conf) ! Configuration File for keepalived global_defs {    router_id REDIS_HA } vrrp_script chk_redis {          script "/etc/keepalived/scripts/redis_check.sh 127.0.0.1 6379"      interval 2      timeout 30      fall 30 } vrrp_instance RE_1 {     state BACKUP     interface eth0     virtual_router_id 60     priority 100     advert_int 1     authentication {         auth_type PASS         auth_pass 1111     }     virtual_ipaddress {         192.168.2.222/24 dev eth0     }      track_script {          chk_redis     }      notify_master "/etc/keepalived/scripts/redis_master.sh 127.0.0.1 10.10.12.7 6379"      notify_backup "/etc/keepalived/scripts/redis_backup.sh 127.0.0.1 10.10.12.7 6379"      notify_fault /etc/keepalived/scripts/redis_fault.sh      notify_stop /etc/keepalived/scripts/redis_stop.sh  }
启动redis-server redis-server /etc/redis/redis-6379.conf
tail /data/redis-6379/redis.log 启动redis-sentinel redis-sentinel /etc/redis/redis-sentinel.conf tail /data/redis-6379/redis-sentinel.log 编写检查redis脚本: 两台同时进行 mkdir /etc/keepalived/scripts cd /etc/keepalived/scripts redis1: vi redis_check.sh #!/bin/bash ALIVE=`/usr/local/bin/redis-cli -h $1 -p $2 PING` LOGFILE="/var/log/keepalived-redis-check.log" #echo "[CHECK]" >> $LOGFILE #date >> $LOGFILE if [ "$ALIVE"x = "PONG"x ]; then :   # echo "Success: redis-cli -h $1 -p $2 PING $ALIVE" >> $LOGFILE 2>&1     exit 0 else     date >> $LOGFILE     echo "Failed:redis-cli -h $1 -p $2 PING $ALIVE " >> $LOGFILE 2>&1         /etc/init.d/keepalived stop     exit 1 fi
vi redis_fault.sh ###!/bin/bash ##LOGFILE=/var/log/keepalived-redis-state.log ###echo "[stop]" >> $LOGFILE ####date >> $LOGFILE #[root@VM-C scripts]# cat redis_fault.sh #!/bin/bash LOGFILE=/var/log/keepalived-redis-state.log echo "[fault]" >> $LOGFILE date >> $LOGFILE
vi redis_stop.sh #!/bin/bash LOGFILE=/var/log/keepalived-redis-state.log echo "[stop]" >> $LOGFILE date >> $LOGFILE
redis2 vi redis_check.sh #!/bin/bash ALIVE=`/usr/local/bin/redis-cli -h $1 -p $2 PING` LOGFILE="/var/log/keepalived-redis-check.log" #echo "[CHECK]" >> $LOGFILE #date >> $LOGFILE if [ "$ALIVE"x = "PONG"x ]; then :   # echo "Success: redis-cli -h $1 -p $2 PING $ALIVE" >> $LOGFILE 2>&1     exit 0 else     date >> $LOGFILE     echo "Failed:redis-cli -h $1 -p $2 PING $ALIVE " >> $LOGFILE 2>&1         /etc/init.d/keepalived stop     exit 1   #  exit 0 fi
vi redis_fault.sh #!/bin/bash LOGFILE=/var/log/keepalived-redis-state.log echo "[stop]" >> $LOGFILE date >> $LOGFILE
vi redis_stop.sh #!/bin/bash LOGFILE=/var/log/keepalived-redis-state.log echo "[stop]" >> $LOGFILE date >> $LOGFILE
vi redis_master.sh #!/bin/bash REDISCLI="/usr/local/bin/redis-cli -h $1 -p $3" LOGFILE="/var/log/keepalived-redis-state.log" echo "[master]" >> $LOGFILE date >> $LOGFILE echo "Being master...." >> $LOGFILE 2>&1 echo "Run MASTER cmd ..." >> $LOGFILE 2>&1 #$REDISCLI SLAVEOF $2 $3 >> $LOGFILE #sleep 10 #delay 10 s wait data async cancel sync echo "Run SLAVEOF NO ONE cmd ..." >> $LOGFILE $REDISCLI SLAVEOF NO ONE >> $LOGFILE 2>&1
vi redis_backup.sh #!/bin/bash REDISCLI="/usr/local/bin/redis-cli" LOGFILE="/var/log/keepalived-redis-state.log" echo "[backup]" >> $LOGFILE date >> $LOGFILE echo "Run SLAVEOF cmd ..." >> $LOGFILE $REDISCLI SLAVEOF $2 $3 >> $LOGFILE 2>&1 # echo "Being slave...." >> $LOGFILE 2>&1 sleep 15 #delay 15 s wait data sync exchange role
启动两台机器的keepalived ip addr 发现VIP绑定在redis1上: killall -9 redis-server杀死redis进程 再次ip addr会发现VIP转移到redis2,并且redis1的keepalived进程停止
重新启动redis1的keepalived 会发现VIP没有转移并且再次查看keepalived会发现又被关闭了,因为redis1的redis还没有恢复。
恢复redis1 redis-server /etc/redis/redis-6379.conf 启动redis1的keepalived就可以发现vip自动转移回redis1:                                 注:在操作过程中有时候redis1关闭后VIP也没有转移。后来查看到脚本中的redis_check.sh中有: ALIVE=`/usr/local/bin/redis-cli -h $1 -p $2 PING`  等内容。但是redis2中的/usr/local/bin/ 里却没有redis-server等bin文件。于是我把/usr/local/sbin/ 中的bin文件cp到/bin就可以了。

www.htsjk.Com true http://www.htsjk.com/redis/36257.html NewsArticle redis+keepalived高可用实战,亲测已成功,rediskeepalived 192.168.2.180:redis1 keepalived-master192.168.2.181:redis2 keepalived-slave192.168.2.222:VIP 安装redis:两台机器同时进行 : # http://down load .redis.io/redis-...
相关文章
    暂无相关文章
评论暂时关闭