在线时间:8:00-16:00
迪恩网络APP
随时随地掌握行业动态
扫描二维码
关注迪恩网络微信公众号
redis 从5开始 可以直接用redis-cli命令创建集群了,不用那么麻烦 安装ruby环境
redis配置文件需要修改的地方 port 7000 cluster-enabled yes cluster-config-file nodes.7000.conf cluster-node-timeout 5000 appendonly yes
mkdir /usr/local/cluster cd /usr/local/cluster/ cd cluster/ mkdir 7000 7001 7002 7003 7004 7005 cd 7000/ cp /www/server/redis/redis.conf ./ vim redis.conf 修改一个配置 然后复制到其他目录 cp redis.conf ../7001/
替换各自端口为其他的 vim 下 :%s/7000/7001/s
vim start.sh 一个个启动太麻烦,脚本一次性启动
#!/bin/bash /www/server/redis/src/redis-server /usr/local/cluster/7000/redis.conf /www/server/redis/src/redis-server /usr/local/cluster/7001/redis.conf /www/server/redis/src/redis-server /usr/local/cluster/7002/redis.conf /www/server/redis/src/redis-server /usr/local/cluster/7003/redis.conf /www/server/redis/src/redis-server /usr/local/cluster/7004/redis.conf /www/server/redis/src/redis-server /usr/local/cluster/7005/redis.conf
chmod +x start.sh ./start.sh
ps -ef|grep redis [root@localhost cluster]# ps -ef|grep redis root 10279 1 0 11:46 ? 00:00:01 /www/server/redis/src/redis-server 0.0.0.0:7000 [cluster] root 10340 1 0 11:47 ? 00:00:01 /www/server/redis/src/redis-server 0.0.0.0:7001 [cluster] root 10346 1 0 11:47 ? 00:00:01 /www/server/redis/src/redis-server 0.0.0.0:7002 [cluster] root 10352 1 0 11:47 ? 00:00:01 /www/server/redis/src/redis-server 0.0.0.0:7003 [cluster] root 10358 1 0 11:47 ? 00:00:01 /www/server/redis/src/redis-server 0.0.0.0:7004 [cluster] root 10364 1 0 11:47 ? 00:00:01 /www/server/redis/src/redis-server 0.0.0.0:7005 [cluster] root 10945 21050 0 11:58 pts/2 00:00:00 grep --color=auto redis [root@localhost cluster]#
启动集群
redis-cli --cluster create 192.168.33.60:7000 192.168.33.60:7001 192.168.33.60:7002 192.168.33.60:7003 192.168.33.60:7004 192.168.33.60:7005 --cluster-replicas 1 root@localhost cluster]# redis-cli --cluster create 192.168.33.60:7000 192.168.33.60:7001 192.168.33.60:7002 192.168.33.60:7003 192.168.33.60:7004 192.168.33.60:7005 --cluster-replicas 1 >>> Performing hash slots allocation on 6 nodes... Master[0] -> Slots 0 - 5460 Master[1] -> Slots 5461 - 10922 Master[2] -> Slots 10923 - 16383 Adding replica 192.168.33.60:7004 to 192.168.33.60:7000 Adding replica 192.168.33.60:7005 to 192.168.33.60:7001 Adding replica 192.168.33.60:7003 to 192.168.33.60:7002 >>> Trying to optimize slaves allocation for anti-affinity [WARNING] Some slaves are in the same host as their master M: 22df8c4f2763a3d40c5533893333608df1e0b792 192.168.33.60:7000 slots:[0-5460] (5461 slots) master M: 4bdf950bbcaa37ff8aa159894c169b4c9fa0b056 192.168.33.60:7001 slots:[5461-10922] (5462 slots) master M: 93a97e0e58375b38ff4639de2e597e1adb675b83 192.168.33.60:7002 slots:[10923-16383] (5461 slots) master S: f68d2dde12b03b84251d69006d845cdfeaeaeb88 192.168.33.60:7003 replicates 93a97e0e58375b38ff4639de2e597e1adb675b83 S: 6e2feeb4ee991d69adae953d47932291bb174445 192.168.33.60:7004 replicates 22df8c4f2763a3d40c5533893333608df1e0b792 S: c0c7a26a69b09cad5d78e78c8d0c7eb327fbf8f5 192.168.33.60:7005 replicates 4bdf950bbcaa37ff8aa159894c169b4c9fa0b056 Can I set the above configuration? (type 'yes' to accept): yes >>> Nodes configuration updated >>> Assign a different config epoch to each node >>> Sending CLUSTER MEET messages to join the cluster Waiting for the cluster to join . >>> Performing Cluster Check (using node 192.168.33.60:7000) M: 22df8c4f2763a3d40c5533893333608df1e0b792 192.168.33.60:7000 slots:[0-5460] (5461 slots) master 1 additional replica(s) S: f68d2dde12b03b84251d69006d845cdfeaeaeb88 192.168.33.60:7003 slots: (0 slots) slave replicates 93a97e0e58375b38ff4639de2e597e1adb675b83 S: 6e2feeb4ee991d69adae953d47932291bb174445 192.168.33.60:7004 slots: (0 slots) slave replicates 22df8c4f2763a3d40c5533893333608df1e0b792 S: c0c7a26a69b09cad5d78e78c8d0c7eb327fbf8f5 192.168.33.60:7005 slots: (0 slots) slave replicates 4bdf950bbcaa37ff8aa159894c169b4c9fa0b056 M: 93a97e0e58375b38ff4639de2e597e1adb675b83 192.168.33.60:7002 slots:[10923-16383] (5461 slots) master 1 additional replica(s) M: 4bdf950bbcaa37ff8aa159894c169b4c9fa0b056 192.168.33.60:7001 slots:[5461-10922] (5462 slots) master 1 additional replica(s) [OK] All nodes agree about slots configuration. >>> Check for open slots... >>> Check slots coverage... [OK] All 16384 slots covered. [root@localhost cluster]#
查看集群状态 redis-cli -h 127.0.0.1 -c -p 7000 info replication redis-cli -h 127.0.0.1 -c -p 7001 info replication redis-cli -h 127.0.0.1 -c -p 7002 info replication redis-cli -h 127.0.0.1 -c -p 7003 info replication redis-cli -h 127.0.0.1 -c -p 7004 info replication redis-cli -h 127.0.0.1 -c -p 7005 info replication
[root@localhost logs]# redis-cli -h 127.0.0.1 -c -p 7002 info replication # Replication role:master connected_slaves:1 slave0:ip=192.168.33.60,port=7003,state=online,offset=1344,lag=0 master_replid:f1b66456b6204b293b39702faea1dee558929070 master_replid2:0000000000000000000000000000000000000000 master_repl_offset:1344 second_repl_offset:-1 repl_backlog_active:1 repl_backlog_size:1048576 repl_backlog_first_byte_offset:1 repl_backlog_histlen:1344
结果 0 1 2 为主 3 4 5 为从 现在kill掉0 试试 kill之前我看了 4是从 [root@localhost logs]# redis-cli -h 127.0.0.1 -c -p 7004 info replication # Replication role:slave master_host:192.168.33.60 master_port:7000 master_link_status:up master_last_io_seconds_ago:4 master_sync_in_progress:0 slave_repl_offset:1330 slave_priority:100 slave_read_only:1 connected_slaves:0 master_replid:4d2c448c8477544dccfa20b248ce37351a725293 master_replid2:0000000000000000000000000000000000000000 master_repl_offset:1330 second_repl_offset:-1 repl_backlog_active:1 repl_backlog_size:1048576 repl_backlog_first_byte_offset:1 repl_backlog_histlen:1330
kill之后 在看 4变成主了 [root@localhost logs]# redis-cli -h 127.0.0.1 -c -p 7004 info replication
设置值 连接7001 127.0.0.1:7003> set name wang
7002上get 127.0.0.1:7002> get name
https://www.jianshu.com/p/a1e62e78667c https://segmentfault.com/a/1190000017151802 |
2023-10-27
2022-08-15
2022-08-17
2022-09-23
2022-08-13
请发表评论