一、安装mysql主从复制
1、主从复制原理
默认你懂
2、主从搭建步骤
1、新建主服务器容器实例3307
docker run -p 3307:3306 --name mysql-master \
-v /mydata/mysql-master/log:/var/log/mysql \
-v /mydata/mysql-master/data:/var/lib/mysql \
-v /mydata/mysql-master/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=root \
-d mysql:5.7
[root@localhost ~]# docker run -p 3307:3306 --name mysql-master \
> -v /mydata/mysql-master/log:/var/log/mysql \
> -v /mydata/mysql-master/data:/var/lib/mysql \
> -v /mydata/mysql-master/conf:/etc/mysql \
> -e MYSQL_ROOT_PASSWORD=root \
> -d mysql:5.7
8a0b88dc966d078c1acfdf45ff99e8b694b584b2fffa8d2a17221da28a85b9d6
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8a0b88dc966d mysql:5.7 "docker-entrypoint.s…" 12 seconds ago Up 9 seconds 33060/tcp, 0.0.0.0:3307->3306/tcp, :::3307->3306/tcp mysql-master
[root@localhost ~]#
2、进入/mydata/mysql-master/conf目录下新建my.cnf
vim my.cnf
vim my.cnf里的内容,注意 里面不能有空格,不然重启的时候起不来
[mysqld]
## 设置server_id,同一局域网中需要唯一
server_id=101
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
## 开启二进制日志功能
log-bin=mall-mysql-bin
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=1M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理时间。默认值为0,表示不自动清理。
expire_logs_days=7
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断。
## 如:1062错误是指一些主键重复,1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
[root@localhost ~]# cd /mydata/mysql-master/conf
[root@localhost conf]# pwd
/mydata/mysql-master/conf
[root@localhost conf]# ll
总用量 0
[root@localhost conf]# vim my.cnf
[root@localhost conf]# cat my.cnf
[mysqld]
## 设置server_id,同一局域网中需要唯一
server_id=101
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
## 开启二进制日志功能
log-bin=mall-mysql-bin
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=1M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理时间。默认值为0,表示不自动清理。
expire_logs_days=7
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断。
## 如:1062错误是指一些主键重复,1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
[root@localhost conf]# pwd
/mydata/mysql-master/conf
[root@localhost conf]# ll
总用量 4
-rw-r--r--. 1 root root 699 4月 16 14:37 my.cnf
[root@localhost conf]#
3、修改完配置后重启master实例
docker restart mysql-master
[root@localhost conf]# docker restart mysql-master
mysql-master
[root@localhost conf]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8a0b88dc966d mysql:5.7 "docker-entrypoint.s…" 19 minutes ago Up 5 seconds 33060/tcp, 0.0.0.0:3307->3306/tcp, :::3307->3306/tcp mysql-master
[root@localhost conf]#
4、进入mysql-master容器
docker exec -it mysql-master /bin/bash
mysql -uroot -proot
草 ,密码错了!!!!!!!!!!!
[root@localhost conf]# docker exec -it mysql-master /bin/bash
root@8a0b88dc966d:/# mysql -uroot -proot
mysql: [Warning] Using a password on the command line interface can be insecure.
ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
root@8a0b88dc966d:/# mysql -uroot -p
Enter password:
ERROR 1045 (28000): Access denied for user 'root'@'localhost' (using password: YES)
root@8a0b88dc966d:/# mysql -uroot -p
最终我用SQLyog连接上了,操
5、master容器实例内创建数据同步用户
创建一个叫slave的用户,密码是123456
CREATE USER 'slave'@'%' IDENTIFIED BY '123456';
给slave授予一些权限
GRANT REPLICATION SLAVE, REPLICATION CLIENT ON *.* TO 'slave'@'%';
6、新建从服务器容器实例3308
docker run -p 3308:3306 --name mysql-slave \
-v /mydata/mysql-slave/log:/var/log/mysql \
-v /mydata/mysql-slave/data:/var/lib/mysql \
-v /mydata/mysql-slave/conf:/etc/mysql \
-e MYSQL_ROOT_PASSWORD=root \
-d mysql:5.7
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8a0b88dc966d mysql:5.7 "docker-entrypoint.s…" About an hour ago Up 43 minutes 33060/tcp, 0.0.0.0:3307->3306/tcp, :::3307->3306/tcp mysql-master
[root@localhost ~]# docker run -p 3308:3306 --name mysql-slave \
> -v /mydata/mysql-slave/log:/var/log/mysql \
> -v /mydata/mysql-slave/data:/var/lib/mysql \
> -v /mydata/mysql-slave/conf:/etc/mysql \
> -e MYSQL_ROOT_PASSWORD=root \
> -d mysql:5.7
8d7e6259cbfb87796e0e181957108bea378cbee389c19477fcd65500bb13a133
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8d7e6259cbfb mysql:5.7 "docker-entrypoint.s…" 11 seconds ago Up 9 seconds 33060/tcp, 0.0.0.0:3308->3306/tcp, :::3308->3306/tcp mysql-slave
8a0b88dc966d mysql:5.7 "docker-entrypoint.s…" About an hour ago Up 43 minutes 33060/tcp, 0.0.0.0:3307->3306/tcp, :::3307->3306/tcp mysql-master
[root@localhost ~]#
7、进入/mydata/mysql-slave/conf目录下新建my.cnf
vim my.cnf
vim my.cnf中的内容
[mysqld]
## 设置server_id,同一局域网中需要唯一
server_id=102
## 指定不需要同步的数据库名称
binlog-ignore-db=mysql
## 开启二进制日志功能,以备Slave作为其它数据库实例的Master时使用
log-bin=mall-mysql-slave1-bin
## 设置二进制日志使用内存大小(事务)
binlog_cache_size=1M
## 设置使用的二进制日志格式(mixed,statement,row)
binlog_format=mixed
## 二进制日志过期清理时间。默认值为0,表示不自动清理。
expire_logs_days=7
## 跳过主从复制中遇到的所有错误或指定类型的错误,避免slave端复制中断。
## 如:1062错误是指一些主键重复,1032错误是因为主从数据库数据不一致
slave_skip_errors=1062
## relay_log配置中继日志
relay_log=mall-mysql-relay-bin
## log_slave_updates表示slave将复制事件写进自己的二进制日志
log_slave_updates=1
## slave设置为只读(具有super权限的用户除外)
read_only=1
[root@localhost ~]# cd /mydata/mysql-slave/conf
[root@localhost conf]# ll
总用量 0
[root@localhost conf]# vim my.cnf
[root@localhost conf]#
8、修改完配置后重启slave实例
docker restart mysql-slave
[root@localhost conf]# docker restart mysql-slave
mysql-slave
[root@localhost conf]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
8d7e6259cbfb mysql:5.7 "docker-entrypoint.s…" 12 minutes ago Up 17 seconds 33060/tcp, 0.0.0.0:3308->3306/tcp, :::3308->3306/tcp mysql-slave
8a0b88dc966d mysql:5.7 "docker-entrypoint.s…" About an hour ago Up 55 minutes 33060/tcp, 0.0.0.0:3307->3306/tcp, :::3307->3306/tcp mysql-master
[root@localhost conf]#
9、在主数据库中查看主从同步状态
show master status;
10、进入mysql-slave容器
docker exec -it mysql-slave /bin/bash
mysql -uroot -proot
[root@localhost conf]# docker exec -it mysql-slave /bin/bash
root@8d7e6259cbfb:/# mysql -uroot -proot
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 2
Server version: 5.7.36-log MySQL Community Server (GPL)
Copyright (c) 2000, 2021, Oracle and/or its affiliates.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql>
11、在从数据库中配置主从复制
change master to master_host='宿主机ip', master_user='slave', master_password='123456', master_port=3307, master_log_file='mall-mysql-bin.000001', master_log_pos=617, master_connect_retry=30;
主从复制命令参数说明
master_host:主数据库的IP地址;
master_port:主数据库的运行端口;
master_user:在主数据库创建的用于同步数据的用户账号;
master_password:在主数据库创建的用于同步数据的用户密码;
master_log_file:指定从数据库要复制数据的日志文件,通过查看主数据的状态,获取File参数;
master_log_pos:指定从数据库从哪个位置开始复制数据,通过查看主数据的状态,获取Position参数;
master_connect_retry:连接失败重试的时间间隔,单位为秒。
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> change master to master_host='192.168.174.136', master_user='slave', master_password='123456', master_port=3307, master_log_file='mall-mysql-bin.000001', master_log_pos=617, master_connect_retry=30;
Query OK, 0 rows affected, 2 warnings (0.06 sec)
mysql>
12、在从数据库中查看主从同步状态
show slave status \G;
mysql> show slave status \G;
*************************** 1. row ***************************
Slave_IO_State:
Master_Host: 192.168.174.136
Master_User: slave
Master_Port: 3307
Connect_Retry: 30
Master_Log_File: mall-mysql-bin.000001
Read_Master_Log_Pos: 617
Relay_Log_File: mall-mysql-relay-bin.000001
Relay_Log_Pos: 4
Relay_Master_Log_File: mall-mysql-bin.000001
Slave_IO_Running: No ## 这里是No说明主从复制还没开始
Slave_SQL_Running: No ## 这里是No说明主从复制还没开始
Replicate_Do_DB:
Replicate_Ignore_DB:
Replicate_Do_Table:
Replicate_Ignore_Table:
Replicate_Wild_Do_Table:
Replicate_Wild_Ignore_Table:
Last_Errno: 0
Last_Error:
Skip_Counter: 0
Exec_Master_Log_Pos: 617
Relay_Log_Space: 154
Until_Condition: None
Until_Log_File:
Until_Log_Pos: 0
Master_SSL_Allowed: No
Master_SSL_CA_File:
Master_SSL_CA_Path:
Master_SSL_Cert:
Master_SSL_Cipher:
Master_SSL_Key:
Seconds_Behind_Master: NULL
Master_SSL_Verify_Server_Cert: No
Last_IO_Errno: 0
Last_IO_Error:
Last_SQL_Errno: 0
Last_SQL_Error:
Replicate_Ignore_Server_Ids:
Master_Server_Id: 0
Master_UUID:
Master_Info_File: /var/lib/mysql/master.info
SQL_Delay: 0
SQL_Remaining_Delay: NULL
Slave_SQL_Running_State:
Master_Retry_Count: 86400
Master_Bind:
Last_IO_Error_Timestamp:
Last_SQL_Error_Timestamp:
Master_SSL_Crl:
Master_SSL_Crlpath:
Retrieved_Gtid_Set:
Executed_Gtid_Set:
Auto_Position: 0
Replicate_Rewrite_DB:
Channel_Name:
Master_TLS_Version:
1 row in set (0.02 sec)
ERROR:
No query specified
mysql>
13、在从数据库中开启主从同步
mysql> start slave;
Query OK, 0 rows affected (0.04 sec)
mysql>
14、查看从数据库状态发现已经同步
mysql> show slave status \G;
*************************** 1. row ***************************
Slave_IO_State: Connecting to master
Master_Host: 192.168.174.136
Master_User: slave
Master_Port: 3307
Connect_Retry: 30
Master_Log_File: mall-mysql-bin.000001
Read_Master_Log_Pos: 617
Relay_Log_File: mall-mysql-relay-bin.000001
Relay_Log_Pos: 4
Relay_Master_Log_File: mall-mysql-bin.000001
Slave_IO_Running: Connecting
Slave_SQL_Running: Yes
Replicate_Do_DB: Yes
Replicate_Ignore_DB:
Replicate_Do_Table:
Replicate_Ignore_Table:
Replicate_Wild_Do_Table:
Replicate_Wild_Ignore_Table:
Last_Errno: 0
Last_Error:
Skip_Counter: 0
Exec_Master_Log_Pos: 617
Relay_Log_Space: 154
Until_Condition: None
Until_Log_File:
Until_Log_Pos: 0
Master_SSL_Allowed: No
Master_SSL_CA_File:
Master_SSL_CA_Path:
Master_SSL_Cert:
Master_SSL_Cipher:
Master_SSL_Key:
Seconds_Behind_Master: NULL
Master_SSL_Verify_Server_Cert: No
Last_IO_Errno: 1045
Last_IO_Error: error connecting to master 'slave@192.168.174.136:3307' - retry-time: 30 retries: 4
Last_SQL_Errno: 0
Last_SQL_Error:
Replicate_Ignore_Server_Ids:
Master_Server_Id: 0
Master_UUID:
Master_Info_File: /var/lib/mysql/master.info
SQL_Delay: 0
SQL_Remaining_Delay: NULL
Slave_SQL_Running_State: Slave has read all relay log; waiting for more updates
Master_Retry_Count: 86400
Master_Bind:
Last_IO_Error_Timestamp: 220416 08:07:45
Last_SQL_Error_Timestamp:
Master_SSL_Crl:
Master_SSL_Crlpath:
Retrieved_Gtid_Set:
Executed_Gtid_Set:
Auto_Position: 0
Replicate_Rewrite_DB:
Channel_Name:
Master_TLS_Version:
1 row in set (0.00 sec)
ERROR:
No query specified
mysql>
15、主从复制测试
主机
CREATE DATABASE db01;
USE db01;
create table t1 (id int,name varchar(20));
insert into t1 values(1,'z3');
insert into t1 values(2,'l4');
select * from t1;
从机
USE db01;
select * from t1;
二、安装redis集群(大厂面试题第4季-分布式存储案例真题)
cluster(集群)模式-docker版 哈希槽分区进行亿级数据存储
面试题
1、1~2亿条数据需要缓存,请问如何设计这个存储案例
回答
单机单台100%不可能,肯定是分布式存储,用redis如何落地?
2、上述问题阿里P6~P7工程案例和场景设计类必考题目,一般业界有3种解决方案
一、哈希取余分区
2亿条记录就是2亿个k,v,我们单机不行必须要分布式多机,假设有3台机器构成一个集群,
用户每次读写操作都是根据公式:hash(key) % N个机器台数,计算出哈希值,
用来决定数据映射到哪一个节点上。
优点: 简单粗暴,直接有效,只需要预估好数据规划好节点,
例如3台、8台、10台,就能保证一段时间的数据支撑。使用Hash算法让固定的一部分请求落到同一台服务器上,
这样每台服务器固定处理一部分请求(并维护这些请求的信息),起到负载均衡+分而治之的作用。
缺点: 原来规划好的节点,进行扩容或者缩容就比较麻烦了额,不管扩缩,每次数据变动导致节点有变动,
映射关系需要重新进行计算,在服务器个数固定不变时没有问题,如果需要弹性扩容或故障停机的情况下,原来
的取模公式就会发生变化:Hash(key)/3会变成Hash(key) /?。此时地址经过取余运算的结果将发生很大变
化,根据公式获取的服务器也会变得不可控。某个redis机器宕机了,由于台数数量变化,会导致hash取余全部
数据重新洗牌。
缺点那???
缺点:
原来规划好的节点,进行扩容或者缩容就比较麻烦了额,不管扩缩,每次数据变动导致节点有变动,
映射关系需要重新进行计算,在服务器个数固定不变时没有问题,如果需要弹性扩容或故障停机的情况下,
原来的取模公式就会发生变化:Hash(key)/3会变成Hash(key) /?。此时地址经过取余运算的结果将发生
很大变化,根据公式获取的服务器也会变得不可控。某个redis机器宕机了,由于台数数量变化,会导致
hash取余全部数据重新洗牌。
二、一致性哈希算法分区
1、是什么?
一致性Hash算法背景
一致性哈希算法在1997年由麻省理工学院中提出的,设计目标是为了解决
分布式缓存数据变动和映射问题,某个机器宕机了,分母数量改变了,自然取余数不OK了。
2、能干嘛
提出一致性Hash解决方案。
目的是当服务器个数发生变动时,
尽量减少影响客户端到服务器的映射关系
3、3大步骤
1、算法构建一致性哈希环
一致性哈希环
一致性哈希算法必然有个hash函数并按照算法产生hash值,这个算法的所有可能哈希值会构成一个全量
集,这个集合可以成为一个hash空间[0,2^32-1],这个是一个线性空间,但是在算法中,我们通过适当
的逻辑控制将它首尾相连(0 = 2^32),这样让它逻辑上形成了一个环形空间。
它也是按照使用取模的方法,前面笔记介绍的节点取模法是对节点(服务器)的数量进行取模。而一致性
Hash算法是对2^32取模,简单来说,**一致性Hash算法将整个哈希值空间组织成一个虚拟的圆环,**如假设
某哈希函数H的值空间为0-2^32-1(即哈希值是一个32位无符号整形),整个哈希环如下图:整个空间按
**顺时针方向组织**,圆环的正上方的点代表0,0点右侧的第一个点代表1,以此类推,2、3、4、
……直到2^32-1,也就是说0点左侧的第一个点代表2^32-1, 0和2^32-1在零点中方向重合,我们把这个
由2^32个点组成的圆环称为Hash环。
2、服务器IP节点映射
节点映射
将集群中各个IP节点映射到环上的某一个位置。
将各个服务器使用Hash进行一个哈希,具体可以选择服务器的IP或主机名作为关键字进行哈希,这样每台
机器就能确定其在哈希环上的位置。假如4个节点NodeA、B、C、D,经过IP地址的哈希函数计(hash(ip)),
使用IP地址哈希后在环空间的位置如下:
3、key落到服务器的落键规则
当我们需要存储一个kv键值对时,首先计算key的hash值,hash(key),将这个key使用相同的函数Hash计算
出哈希值并确定此数据在环上的位置,从此位置沿环顺时针“行走”,第一台遇到的服务器就是其应该定位到的
服务器,并将该键值对存储在该节点上。
如我们有Object A、Object B、Object C、Object D四个数据对象,经过哈希计算后,在环空间上的位置
如下:根据一致性Hash算法,数据A会被定为到Node A上,B被定为到Node B上,C被定为到Node C上,D被
定为到Node D上。
4、优点
1、一致性哈希算法的容错性
容错性
假设Node C宕机,可以看到此时对象A、B、D不会受到影响,只有C对象被重定位到Node D。一般的,在一致性
Hash算法中,如果一台服务器不可用,则受影响的数据仅仅是此服务器到其环空间中前一台服务器(即沿着逆时
针方向行走遇到的第一台服务器)之间数据,其它不会受到影响。简单说,就是C挂了,受到影响的只是B、C之
间的数据,并且这些数据会转移到D进行存储。
2、一致性哈希算法的扩展性
扩展性
数据量增加了,需要增加一台节点NodeX,X的位置在A和B之间,那收到影响的也就是A到X之间的数据,重新把A
到X的数据录入到X上即可,不会导致hash取余全部数据重新洗牌。
4、缺点
一致性哈希算法的数据倾斜问题
Hash环的数据倾斜问题
一致性Hash算法在服务节点太少时,容易因为节点分布不均匀而造成数据倾斜(被缓存的对象大部分集中缓存在
某一台服务器上)问题,
例如系统中只有两台服务器:
小总结
为了在节点数目发生改变时尽可能少的迁移数据
将所有的存储节点排列在收尾相接的Hash环上,每个key在计算Hash后会顺时针找到临近的存储节点存放。
而当有节点加入或退出时仅影响该节点在Hash环上顺时针相邻的后续节点。
优点
加入和删除节点只影响哈希环中顺时针方向的相邻的节点,对其他节点无影响。
缺点
数据的分布和节点的位置有关,因为这些节点不是均匀的分布在哈希环上的,所以数据在进行存储时达不到均匀
分布的效果。
三、哈希槽分区
是什么
1 为什么出现
哈希槽实质就是一个数组,数组[0,2^14 -1]形成hash slot空间。
2 能干什么 解决均匀分配的问题,在数据和节点之间又加入了一层,把这层称为哈希槽(slot),用于管理数据和节点之间的关系,现在就相当于节点上放的是槽,槽里放的是数据。
槽解决的是粒度问题,相当于把粒度变大了,这样便于数据移动。 哈希解决的是映射问题,使用key的哈希值来计算所在的槽,便于数据分配。
3 多少个hash槽 一个集群只能有16384个槽,编号0-16383(0-2^14-1)。这些槽会分配给集群中的所有主节点,分配策略没有要求。可以指定哪些编号的槽分配给哪个主节点。集群会记录节点和槽的对应关系。解决了节点和槽的关系后,接下来就需要对key求哈希值,然后对16384取余,余数是几key就落入对应的槽里。slot = CRC16(key) % 16384。以槽为单位移动数据,因为槽的数目是固定的,处理起来比较容易,这样数据移动问题就解决了。
哈希槽计算
Redis 集群中内置了 16384 个哈希槽,redis 会根据节点数量大致均等的将哈希槽映射到不同的节点。当需要在 Redis 集群中放置一个 key-value时,redis 先对 key 使用 crc16 算法算出一个结果,然后把结果对 16384 求余数,这样每个 key 都会对应一个编号在 0-16383 之间的哈希槽,也就是映射到某个节点上。如下代码,key之A 、B在Node2, key之C落在Node3上
3主3从redis集群扩缩容配置案例架构说明
见自己的processon笔记
开打步骤
1、3主3从redis集群配置
1、关闭防火墙+启动docker后台服务
systemctl start docker
[root@localhost ~]# systemctl start docker
[root@localhost ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
192.168.174.133:5000/zzyyubuntu 1.2 04ea4a10f57c 8 days ago 109MB
registry.cn-hangzhou.aliyuncs.com/testshanghai/myubuntu 1.3 8d4088598f0b 8 days ago 176MB
tomcat latest fb5657adc892 3 months ago 680MB
mysql 5.7 c20987f18b13 3 months ago 448MB
rabbitmq management 6c3c2a225947 4 months ago 253MB
registry latest b8604a3fe854 5 months ago 26.2MB
ubuntu latest ba6acccedd29 6 months ago 72.8MB
redis 6.0.8 16ecd2772934 17 months ago 104MB
billygoo/tomcat8-jdk8 latest 30ef4019761d 3 years ago 523MB
[root@localhost ~]#
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@localhost ~]#
2、新建6个docker容器redis实例
docker run -d --name redis-node-1 --net host --privileged=true -v /data/redis/share/redis-node-1:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6381
docker run -d --name redis-node-2 --net host --privileged=true -v /data/redis/share/redis-node-2:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6382
docker run -d --name redis-node-3 --net host --privileged=true -v /data/redis/share/redis-node-3:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6383
docker run -d --name redis-node-4 --net host --privileged=true -v /data/redis/share/redis-node-4:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6384
docker run -d --name redis-node-5 --net host --privileged=true -v /data/redis/share/redis-node-5:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6385
docker run -d --name redis-node-6 --net host --privileged=true -v /data/redis/share/redis-node-6:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6386
## --cluster-enabled yes 是否开启集群
如果运行成功,效果如下:
[root@localhost ~]# docker run -d --name redis-node-1 --net host --privileged=true -v /data/redis/share/redis-node-1:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6381
s-node-2:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6382
docker run -d --name redis-node-3 --net host --privileged=true -v /data/redis/share/redis-node-3:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6383
docker run -d --name redis-node-4 --net host --privileged=true -v /data/redis/share/redis-node-4:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6384
docker run -d --name redis-node-5 --net host --privileged=true -v /data/redis/share/redis-node-5:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6385
docker run -d --name redis-node-6 --net host --privileged=true -v /data/redis/share/redis-node-6:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6386273391a8b73118e115c4f37bcb6a220b959b116ab0cb534b54ed6c4a8f25f1d8
[root@localhost ~]#
[root@localhost ~]# docker run -d --name redis-node-2 --net host --privileged=true -v /data/redis/share/redis-node-2:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6382
c51f088b6280f04a59ee30cfb9c42401c28dbaab455d718aa238c83154d349a2
[root@localhost ~]#
[root@localhost ~]# docker run -d --name redis-node-3 --net host --privileged=true -v /data/redis/share/redis-node-3:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6383
80e661ede711d38a7e5739cc17b204fe146f7a6ee93240bb796707510cb19198
[root@localhost ~]#
[root@localhost ~]# docker run -d --name redis-node-4 --net host --privileged=true -v /data/redis/share/redis-node-4:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6384
2a9ec724cc0517cbb073976cb19d971b3936737cd197f58e6655a64cda40db35
[root@localhost ~]#
[root@localhost ~]# docker run -d --name redis-node-5 --net host --privileged=true -v /data/redis/share/redis-node-5:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6385
0cfc6b6044d84c33b73abd084c475613d2a88260e4ede133028a36f4eea25263
[root@localhost ~]#
[root@localhost ~]# docker run -d --name redis-node-6 --net host --privileged=true -v /data/redis/share/redis-node-6:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6386
63e0c4ab89d85ae2a3622ee3ccca517f26555b2e5aa9090624a8084cccb36c07
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
63e0c4ab89d8 redis:6.0.8 "docker-entrypoint.s…" 13 seconds ago Up 12 seconds redis-node-6
0cfc6b6044d8 redis:6.0.8 "docker-entrypoint.s…" 19 seconds ago Up 18 seconds redis-node-5
2a9ec724cc05 redis:6.0.8 "docker-entrypoint.s…" 19 seconds ago Up 19 seconds redis-node-4
80e661ede711 redis:6.0.8 "docker-entrypoint.s…" 20 seconds ago Up 19 seconds redis-node-3
c51f088b6280 redis:6.0.8 "docker-entrypoint.s…" 20 seconds ago Up 19 seconds redis-node-2
273391a8b731 redis:6.0.8 "docker-entrypoint.s…" 21 seconds ago Up 20 seconds redis-node-1
[root@localhost ~]#
命令分步解释
docker run 创建并运行docker容器实例
--name redis-node-6 容器名字
--net host 使用宿主机的IP和端口,默认
--privileged=true 获取宿主机root用户权限
-v /data/redis/share/redis-node-6:/data 容器卷,宿主机地址:docker内部地址
redis:6.0.8 redis镜像和版本号
--cluster-enabled yes 开启redis集群
--appendonly yes 开启持久化
--port 6386 redis端口号
3、进入容器redis-node-1并为6台机器构建集群关系 进入容器
docker exec -it redis-node-1 /bin/bash
构建主从关系
//注意,进入docker容器后才能执行一下命令,且注意自己的真实IP地址
redis-cli --cluster create 192.168.174.138:6381 192.168.174.138:6382 192.168.174.138:6383 192.168.174.138:6384 192.168.174.138:6385 192.168.174.138:6386 --cluster-replicas 1
--cluster-replicas 1 表示为每个master创建一个slave节点
## redis-cli 进入容器内部
## --cluster create 构建集群
[root@localhost ~]# docker exec -it redis-node-1 /bin/bash
root@localhost:/data# redis-cli --cluster create 192.168.174.138:6381 192.168.174.138:6382 192.168.174.138:6383 192.168.174.138:6384 192.168.174.138:6385 192.168.174.138:6386 --cluster-replicas 1
>>> Performing hash slots allocation on 6 nodes...
Master[0] -> Slots 0 - 5460
Master[1] -> Slots 5461 - 10922
Master[2] -> Slots 10923 - 16383
Adding replica 192.168.174.138:6385 to 192.168.174.138:6381
Adding replica 192.168.174.138:6386 to 192.168.174.138:6382
Adding replica 192.168.174.138:6384 to 192.168.174.138:6383
>>> Trying to optimize slaves allocation for anti-affinity
[WARNING] Some slaves are in the same host as their master
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-5460] (5461 slots) master
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[5461-10922] (5462 slots) master
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[10923-16383] (5461 slots) master
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
Can I set the above configuration? (type 'yes' to accept): yes
>>> Nodes configuration updated
>>> Assign a different config epoch to each node
>>> Sending CLUSTER MEET messages to join the cluster
Waiting for the cluster to join
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
一切OK的话,3主3从搞定
4、链接进入6381作为切入点,查看集群状态
链接进入6381作为切入点,查看节点状态
查看集群信息
cluster info
查看集群有哪些节点
cluster nodes
默认是6379,现在我们是集群环境了,要写对应的端口号
redis-cli -p 6381
从上图可以看出主从机的对应的挂载关系 注意:它的每次挂载的关系不是固定的,要看每次的实际挂载关系,也就是说我这次的案例是这种关系,下次不一定是6381—>6386
2、主从容错切换迁移案例
1、数据读写存储
启动6机构成的集群并通过exec进入 我们先进入redis-node-1
[root@localhost ~]# docker exec -it redis-node-1 /bin/bash
root@localhost:/data# redis-cli -p 6381 ## 单机环境连接
对6381新增两个key
127.0.0.1:6381> keys *
(empty array)
127.0.0.1:6381> set k1 v1
(error) MOVED 12706 192.168.174.138:6383
127.0.0.1:6381> set k2 v2
OK
127.0.0.1:6381> exit 退出
root@localhost:/data# exit 退出
exit
[root@localhost ~]#
草 为什么k1存不进去k2能存进去,。。。。
因为我们现在开启了集群环境,而我们进入的是单机环境,集群环境是利用卡槽形式存储的,我们存k1的时候报了个错(error) MOVED 12706 192.168.174.138:6383
卡槽数:12706
而我们的6381的卡槽数是0 - 5460,很显然12706大于5460所以存不进去,
所以我们不应该从单机环境进入,应该从集群环境进去
防止路由失效加参数-c并新增两个key
[root@localhost ~]# docker exec -it redis-node-1 /bin/bash 从新进入容器
root@localhost:/data# redis-cli -p 6381 -c 集群环境进入
127.0.0.1:6381> FLUSHALL 清空刚才的数据
OK
127.0.0.1:6381> set k1 v1
-> Redirected to slot [12706] located at 192.168.174.138:6383 ##12706大于0 - 5460进入6383
OK
192.168.174.138:6383> set k2 v2 ## 转到了6383端口
-> Redirected to slot [449] located at 192.168.174.138:6381 ##449小于0 - 5460进入6381
OK
192.168.174.138:6381> set k3 v3 ## 转到了6381端口
OK
192.168.174.138:6381> set k4 v4
-> Redirected to slot [8455] located at 192.168.174.138:6382 ##8455大于0 - 5460进入6382
OK
192.168.174.138:6382> ## 转到了6382端口
加入参数-c 优化路由
查看集群信息
redis-cli --cluster check 192.168.174.138:6381
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6381
192.168.174.138:6381 (a4ec9f82...) -> 2 keys | 5461 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 5461 slots | 1 slaves.
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 4 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
redis-cli -p 6381 -c 只要是集群连接,存储好的key ,任何一台机器都可以读取的到
2、容错切换迁移
1、主6381和从机切换,先停止主机6381
6381主机停了,对应的真实从机上位
6381作为1号主机分配的从机以实际情况为准,具体是几号机器就是几号
root@localhost:/data# redis-cli -p 6381 -c
127.0.0.1:6381> cluster nodes ##现在6381是master,6386是slave
dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384@16384 slave a23d63cc7a2d2927d6e3689d831b2b6107e3486b 0 1650198627017 2 connected
fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383@16383 master - 0 1650198628035 3 connected 10923-16383
bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385@16385 slave fc8e2091cd5dd5966865aa280a3cb05f06ea853d 0 1650198629058 3 connected
a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381@16381 myself,master - 0 1650198627000 1 connected 0-5460
a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382@16382 master - 0 1650198628000 2 connected 5461-10922
b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386@16386 slave a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 0 1650198626000 1 connected
127.0.0.1:6381> exit
root@localhost:/data# exit
exit
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
63e0c4ab89d8 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-6
0cfc6b6044d8 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-5
2a9ec724cc05 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-4
80e661ede711 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-3
c51f088b6280 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-2
273391a8b731 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-1
[root@localhost ~]# docker stop redis-node-1
redis-node-1
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
63e0c4ab89d8 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-6
0cfc6b6044d8 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-5
2a9ec724cc05 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-4
80e661ede711 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-3
c51f088b6280 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-2
[root@localhost ~]#
6381停了,我们进入6382
[root@localhost ~]# docker exec -it redis-node-2 bash
root@localhost:/data# redis-cli -p 6382 -c
127.0.0.1:6382> cluster nodes ## 可以看到还是六个节点,6381刚开始是master现在变fail 了,以前6386是slave 现在变master了
a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382@16382 myself,master - 0 1650198931000 2 connected 5461-10922
b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386@16386 master - 0 1650198933417 7 connected 0-5460
bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385@16385 slave fc8e2091cd5dd5966865aa280a3cb05f06ea853d 0 1650198932332 3 connected
fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383@16383 master - 0 1650198931245 3 connected 10923-16383
a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381@16381 master,fail - 1650198729507 1650198726000 1 disconnected
dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384@16384 slave a23d63cc7a2d2927d6e3689d831b2b6107e3486b 0 1650198931000 2 connected
127.0.0.1:6382>
127.0.0.1:6382> get k1
-> Redirected to slot [12706] located at 192.168.174.138:6383
"v1"
192.168.174.138:6383> get k2
-> Redirected to slot [449] located at 192.168.174.138:6386
"v2"
192.168.174.138:6386> get k3
"v3"
192.168.174.138:6386> get k4
-> Redirected to slot [8455] located at 192.168.174.138:6382
"v4"
192.168.174.138:6382> ## 数据还在
2、再次查看集群信息
6381宕机了,6386上位成为了新的master。
备注:本次脑图笔记6381为主下面挂从6386。
每次案例下面挂的从机以实际情况为准,具体是几号机器就是几号
3、先还原之前的3主3从
我们在把6381给就活看看6386是不是master了 中间需要等待一会儿,docker集群重新响应。
root@localhost:/data# [root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
63e0c4ab89d8 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-6
0cfc6b6044d8 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-5
2a9ec724cc05 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-4
80e661ede711 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-3
c51f088b6280 redis:6.0.8 "docker-entrypoint.s…" 2 hours ago Up 2 hours redis-node-2
[root@localhost ~]# docker start redis-node-1
redis-node-1
[root@localhost ~]#
可以看到6386的地位还是不变的还是master
192.168.174.138:6382> cluster nodes
a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382@16382 myself,master - 0 1650199579000 2 connected 5461-10922
b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386@16386 master - 0 1650199580000 7 connected 0-5460
bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385@16385 slave fc8e2091cd5dd5966865aa280a3cb05f06ea853d 0 1650199577000 3 connected
fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383@16383 master - 0 1650199580354 3 connected 10923-16383
a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381@16381 slave b0534a266761a3f22089587dfce8ad815b3cacc7 0 1650199581373 7 connected
dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384@16384 slave a23d63cc7a2d2927d6e3689d831b2b6107e3486b 0 1650199581000 2 connected
192.168.174.138:6382>
但是按照习惯我们还是想让6381当老大
先启6381
docker start redis-node-1
再停6386
docker stop redis-node-6
[root@localhost ~]# docker stop redis-node-6
redis-node-6
192.168.174.138:6382> cluster nodes ## 我们看到6381有变成master ,老大又回来了
a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382@16382 myself,master - 0 1650199914000 2 connected 5461-10922
b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386@16386 master,fail - 1650199877647 1650199873581 7 disconnected
bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385@16385 slave fc8e2091cd5dd5966865aa280a3cb05f06ea853d 0 1650199915471 3 connected
fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383@16383 master - 0 1650199915000 3 connected 10923-16383
a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381@16381 master - 0 1650199914448 8 connected 0-5460
dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384@16384 slave a23d63cc7a2d2927d6e3689d831b2b6107e3486b 0 1650199916532 2 connected
192.168.174.138:6382>
再启6386
docker start redis-node-6
[root@localhost ~]# docker start redis-node-6
redis-node-6
[root@localhost ~]#
192.168.174.138:6382> cluster nodes ## 6386 变成了slave ,小弟还是原来的小弟
a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382@16382 myself,master - 0 1650200056000 2 connected 5461-10922
b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386@16386 slave a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 0 1650200057000 8 connected
bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385@16385 slave fc8e2091cd5dd5966865aa280a3cb05f06ea853d 0 1650200058545 3 connected
fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383@16383 master - 0 1650200057531 3 connected 10923-16383
a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381@16381 master - 0 1650200056000 8 connected 0-5460
dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384@16384 slave a23d63cc7a2d2927d6e3689d831b2b6107e3486b 0 1650200057000 2 connected
192.168.174.138:6382>
主从机器分配情况以实际情况为准 4、查看集群状态
redis-cli --cluster check 自己IP:6381
[root@localhost ~]# redis-cli --cluster check 192.168.174.138:6381
192.168.174.138:6381 (a4ec9f82...) -> 2 keys | 5461 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 5461 slots | 1 slaves.
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 4 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
[root@localhost ~]#
3、主从扩容案例
1、新建6387、6388两个节点+新建后启动+查看是否8节点
docker run -d --name redis-node-7 --net host --privileged=true -v /data/redis/share/redis-node-7:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6387
docker run -d --name redis-node-8 --net host --privileged=true -v /data/redis/share/redis-node-8:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6388
docker ps
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
63e0c4ab89d8 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 35 minutes redis-node-6
0cfc6b6044d8 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-5
2a9ec724cc05 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-4
80e661ede711 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-3
c51f088b6280 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-2
273391a8b731 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 42 minutes redis-node-1
[root@localhost ~]# docker run -d --name redis-node-7 --net host --privileged=true -v /data/redis/share/redis-node-7:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6387
abd72ff7a155abf16955803e6ee76a204e738b85377be84b1a94c572b532b5ac
[root@localhost ~]# docker run -d --name redis-node-8 --net host --privileged=true -v /data/redis/share/redis-node-8:/data redis:6.0.8 --cluster-enabled yes --appendonly yes --port 6388
96cc4c7230c859f0858bee17d1796193f9d735edc4b1b82b3a59d48a1213d8ef
[root@localhost ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
96cc4c7230c8 redis:6.0.8 "docker-entrypoint.s…" 4 seconds ago Up 3 seconds redis-node-8
abd72ff7a155 redis:6.0.8 "docker-entrypoint.s…" 19 seconds ago Up 18 seconds redis-node-7
63e0c4ab89d8 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 35 minutes redis-node-6
0cfc6b6044d8 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-5
2a9ec724cc05 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-4
80e661ede711 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-3
c51f088b6280 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 3 hours redis-node-2
273391a8b731 redis:6.0.8 "docker-entrypoint.s…" 3 hours ago Up 43 minutes redis-node-1
[root@localhost ~]#
2、进入6387容器实例内部
docker exec -it redis-node-7 /bin/bash
3、将新增的6387节点(空槽号)作为master节点加入原集群
将新增的6387作为master节点加入集群redis-cli --cluster add-node 自己实际IP地址:6387 自己实际IP地址:6381
6387 就是将要作为master新增节点6381 就是原来集群节点里面的领路人,相当于6387拜拜6381的码头从而找到组织加入集群
[root@localhost ~]# docker exec -it redis-node-7 /bin/bash
root@localhost:/data# redis-cli --cluster add-node 192.168.174.138:6387 192.168.174.138:6381
>>> Adding node 192.168.174.138:6387 to cluster 192.168.174.138:6381
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.174.138:6387 to make it join the cluster.
[OK] New node added correctly.
root@localhost:/data#
4、检查集群情况第1次
redis-cli --cluster check 真实ip地址:6381
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6381
192.168.174.138:6381 (a4ec9f82...) -> 2 keys | 5461 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 5461 slots | 1 slaves.
192.168.174.138:6387 (b8d70377...) -> 0 keys | 0 slots | 0 slaves.
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 5462 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-5460] (5461 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[10923-16383] (5461 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots: (0 slots) master
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[5461-10922] (5462 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
5、重新分派槽号
重新分派槽号命令:
redis-cli --cluster reshard IP地址:端口号
redis-cli --cluster reshard 192.168.111.147:6381
6、检查集群情况第2次
redis-cli --cluster check 真实ip地址:6381
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6381
192.168.174.138:6381 (a4ec9f82...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6387 (b8d70377...) -> 1 keys | 4096 slots | 0 slaves.
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
槽号分派说明
为什么6387是3个新的区间,以前的还是连续?重新分配成本太高,所以前3家各自匀出来一部分,从6381/6382/6383三个旧节点分别匀出1364个坑位给新节点6387
7、为主节点6387分配从节点6388
命令:redis-cli --cluster add-node ip:新slave端口 ip:新master端口 --cluster-slave --cluster-master-id 新主机节点ID
redis-cli --cluster add-node 192.168.174.138:6388 192.168.174.138:6387 --cluster-slave --cluster-master-id b8d7037782e7f7278a1ac528978c96213f377f9a
-------这个是6387的编号,按照自己实际情况
root@localhost:/data# redis-cli --cluster add-node 192.168.174.138:6388 192.168.174.138:6387 --cluster-slave --cluster-master-id b8d7037782e7f7278a1ac528978c96213f377f9a
>>> Adding node 192.168.174.138:6388 to cluster 192.168.174.138:6387
>>> Performing Cluster Check (using node 192.168.174.138:6387)
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
>>> Send CLUSTER MEET to node 192.168.174.138:6388 to make it join the cluster.
Waiting for the cluster to join
>>> Configure node as replica of 192.168.174.138:6387.
[OK] New node added correctly.
root@localhost:/data#
8、检查集群情况第3次
redis-cli --cluster check 192.168.174.138:6382
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6382
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6387 (b8d70377...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6381 (a4ec9f82...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6382)
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
S: db1ced0b3b786226edd4d0262a94d1ff8a7cc6d6 192.168.174.138:6388
slots: (0 slots) slave
replicates b8d7037782e7f7278a1ac528978c96213f377f9a
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
4、主从缩容案例
1、目的:6387和6388下线
2、检查集群情况1获得6388的节点ID
redis-cli --cluster check 192.168.174.138:6382
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6382
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6387 (b8d70377...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6381 (a4ec9f82...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6382)
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
S: db1ced0b3b786226edd4d0262a94d1ff8a7cc6d6 192.168.174.138:6388
slots: (0 slots) slave
replicates b8d7037782e7f7278a1ac528978c96213f377f9a
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
3、将6388删除 从集群中将4号从节点6388删除
命令:redis-cli --cluster del-node ip:从机端口 从机6388节点ID
redis-cli --cluster del-node 192.168.174.138:6388 db1ced0b3b786226edd4d0262a94d1ff8a7cc6d6
redis-cli --cluster check 192.168.111.147:6382
检查一下发现,6388被删除了,只剩下7台机器了。
root@localhost:/data# redis-cli --cluster del-node 192.168.174.138:6388 db1ced0b3b786226edd4d0262a94d1ff8a7cc6d6
>>> Removing node db1ced0b3b786226edd4d0262a94d1ff8a7cc6d6 from cluster 192.168.174.138:6388
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@localhost:/data#
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6382
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6387 (b8d70377...) -> 1 keys | 4096 slots | 0 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6381 (a4ec9f82...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6382)
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
4、将6387的槽号清空,重新分配,本例将清出来的槽号都给6381
以6381端口将整个集群的槽号重新分配,我们这里只是以6381为突破口而已
redis-cli --cluster reshard 192.168.174.138:6381
root@localhost:/data# redis-cli --cluster reshard 192.168.174.138:6381
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[1365-5460] (4096 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots:[0-1364],[5461-6826],[10923-12287] (4096 slots) master
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
How many slots do you want to move (from 1 to 16384)? 4096
What is the receiving node ID? a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
Please enter all the source node IDs.
Type 'all' to use all the nodes as source nodes for the hash slots.
Type 'done' once you entered all the source nodes IDs.
Source node #1: b8d7037782e7f7278a1ac528978c96213f377f9a
Source node #2: done
5、检查集群情况第二次
redis-cli --cluster check 192.168.174.138:6381
4096个槽位都指给6381,它变成了8192个槽位,相当于全部都给6381了,不然要输入3次,一锅端
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6381
192.168.174.138:6381 (a4ec9f82...) -> 2 keys | 8192 slots | 1 slaves. ## 6381有两个4096
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6387 (b8d70377...) -> 0 keys | 0 slots | 0 slaves. ## 6387的槽位被清空了
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 4 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-6826],[10923-12287] (8192 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: b8d7037782e7f7278a1ac528978c96213f377f9a 192.168.174.138:6387
slots: (0 slots) master
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
6、将6387删除
命令:redis-cli --cluster del-node ip:端口 6387节点ID
redis-cli --cluster del-node 192.168.174.138:6387 b8d7037782e7f7278a1ac528978c96213f377f9a
root@localhost:/data# redis-cli --cluster del-node 192.168.174.138:6387 b8d7037782e7f7278a1ac528978c96213f377f9a
>>> Removing node b8d7037782e7f7278a1ac528978c96213f377f9a from cluster 192.168.174.138:6387
>>> Sending CLUSTER FORGET messages to the cluster...
>>> Sending CLUSTER RESET SOFT to the deleted node.
root@localhost:/data#
7、检查集群情况第三次
redis-cli --cluster check 192.168.174.138:6381
root@localhost:/data# redis-cli --cluster check 192.168.174.138:6381
192.168.174.138:6381 (a4ec9f82...) -> 2 keys | 8192 slots | 1 slaves.
192.168.174.138:6383 (fc8e2091...) -> 1 keys | 4096 slots | 1 slaves.
192.168.174.138:6382 (a23d63cc...) -> 1 keys | 4096 slots | 1 slaves.
[OK] 4 keys in 3 masters.
0.00 keys per slot on average.
>>> Performing Cluster Check (using node 192.168.174.138:6381)
M: a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851 192.168.174.138:6381
slots:[0-6826],[10923-12287] (8192 slots) master
1 additional replica(s)
M: fc8e2091cd5dd5966865aa280a3cb05f06ea853d 192.168.174.138:6383
slots:[12288-16383] (4096 slots) master
1 additional replica(s)
S: b0534a266761a3f22089587dfce8ad815b3cacc7 192.168.174.138:6386
slots: (0 slots) slave
replicates a4ec9f8257c5bb8ec9c38ca0b41a902aa8d51851
M: a23d63cc7a2d2927d6e3689d831b2b6107e3486b 192.168.174.138:6382
slots:[6827-10922] (4096 slots) master
1 additional replica(s)
S: dde08f07b2179b0ef1a56aba8e85549db3c99db6 192.168.174.138:6384
slots: (0 slots) slave
replicates a23d63cc7a2d2927d6e3689d831b2b6107e3486b
S: bf472ad798438b1d78916341624d41c68fadc3c4 192.168.174.138:6385
slots: (0 slots) slave
replicates fc8e2091cd5dd5966865aa280a3cb05f06ea853d
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.
root@localhost:/data#
又变成3主3从了,3个M3个S
暂无评论内容