基于(docker swarm)
建议
需要在每台服务器上安装时间同步服务,推荐使用chrony或者ntpd
磁盘与节点参考如下
条目 | 限制 |
---|---|
每个集群的最大服务器数 | 不限 |
联盟集群的最大数量 | 不限 |
最小服务器数 | 02 |
服务器数为1时,每台服务器的最小硬盘数 | 04 |
服务器数为2或3时,每台服务器的最小硬盘数 | 02 |
服务器数为4时,每台服务器的最小硬盘数 | 01 |
每台服务器的最大硬盘数 | 不限 |
读仲裁 | N/2 |
写仲裁 | N/2+1 |
如我4个节点,每个节点一个硬盘,4*1/2+1=3 ,低于3个硬盘就无法正常写入,低于2个硬盘就无法正常读取
挂载数据盘,建议 1T 以上,使用 LVM 方便将来扩展。
Minio 集群至少需要四个节点,因此至少安装 4 台虚拟机。主机名随意,最好以数字后缀区分。
以挂载/dev/sdb为例,在4台服务器同时执行
vgcreate vg_minio /dev/sdb
lvcreate -n lv_minio -l 100%FREE vg_minio
mkfs.xfs /dev/mapper/vg_minio-lv_minio
mkdir -p /minio/data
mount /dev/mapper/vg_minio-lv_minio /minio/data
配置开启自动挂载,打开/etc/fstab
/dev/mapper/vg_minio-lv_minio /minio/data xfs defaults 0 0
如果只做存储,最好将docker的数据目录改到/minio/data
#修改 /etc/docker/daemon.json
{
"data-root": "/minio/data"
}
#重启docker
systemctl daemon-reload
systemctl restart docker
配置节点标签,对应minio集群
docker node update minio1 --label-add minio1=true
docker node update minio2 --label-add minio2=true
docker node update minio3 --label-add minio3=true
docker node update minio4 --label-add minio4=true
如需要开启https,生成自签证书
certgen -host "localhost,minio*"
放入证书挂载目录
mv private.key public.crt /minio/certs
编写stack yml文件
version: '3.7'
services:
minio1:
image: minio/minio:latest
hostname: minio1
deploy:
placement:
constraints:
- node.labels.minio1==true
ports:
- target: 9000
published: 9000
protocol: tcp
mode: host
- target: 9090
published: 9090
protocol: tcp
mode: host
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- minio_distributed
volumes:
- /minio/data:/data
# - /minio/certs:/certs
- /etc/localtime:/etc/localtime:ro
environment:
- MINIO_ROOT_USER=admin
- MINIO_ROOT_PASSWORD=12345678
- MINIO_DISTRIBUTED_MODE_ENABLED=yes
command: minio server --console-address ":9090" http://minio{1...4}/data
extra_hosts:
- "minio1:192.168.50.128"
- "minio2:192.168.50.129"
- "minio3:192.168.50.131"
- "minio4:192.168.50.130"
minio2:
image: minio/minio:latest
hostname: minio2
deploy:
placement:
constraints:
- node.labels.minio2==true
ports:
- target: 9000
published: 9000
protocol: tcp
mode: host
- target: 9090
published: 9090
protocol: tcp
mode: host
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- minio_distributed
volumes:
- /minio/data:/data
# - /minio/certs:/certs
- /etc/localtime:/etc/localtime:ro
environment:
- MINIO_ROOT_USER=admin
- MINIO_ROOT_PASSWORD=12345678
- MINIO_DISTRIBUTED_MODE_ENABLED=yes
command: minio server --console-address ":9090" http://minio{1...4}/data
extra_hosts:
- "minio1:192.168.50.128"
- "minio2:192.168.50.129"
- "minio3:192.168.50.131"
- "minio4:192.168.50.130"
minio3:
image: minio/minio:latest
hostname: minio3
deploy:
placement:
constraints:
- node.labels.minio3==true
ports:
- target: 9000
published: 9000
protocol: tcp
mode: host
- target: 9090
published: 9090
protocol: tcp
mode: host
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- minio_distributed
volumes:
- /minio/data:/data
# - /minio/certs:/certs
- /etc/localtime:/etc/localtime:ro
environment:
- MINIO_ROOT_USER=admin
- MINIO_ROOT_PASSWORD=12345678
- MINIO_DISTRIBUTED_MODE_ENABLED=yes
command: minio server --console-address ":9090" http://minio{1...4}/data
extra_hosts:
- "minio1:192.168.50.128"
- "minio2:192.168.50.129"
- "minio3:192.168.50.131"
- "minio4:192.168.50.130"
minio4:
image: minio/minio:latest
hostname: minio4
deploy:
placement:
constraints:
- node.labels.minio4==true
ports:
- target: 9000
published: 9000
protocol: tcp
mode: host
- target: 9090
published: 9090
protocol: tcp
mode: host
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
networks:
- minio_distributed
volumes:
- /minio/data:/data
# - /minio/certs:/certs
- /etc/localtime:/etc/localtime:ro
environment:
- MINIO_ROOT_USER=admin
- MINIO_ROOT_PASSWORD=12345678
- MINIO_DISTRIBUTED_MODE_ENABLED=yes
command: minio server --console-address ":9090" http://minio{1...4}/data
extra_hosts:
- "minio1:192.168.50.128"
- "minio2:192.168.50.129"
- "minio3:192.168.50.131"
- "minio4:192.168.50.130"
networks:
minio_distributed:
driver: overlay
部署haproxy,编写stack yml文件
version: '3.7'
services:
haproxy:
image: haproxy:2.5.7-alpine
volumes:
- /data/haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg
deploy:
mode: global
placement:
constraints:
- node.labels.haproxy==true
修改 haproxy 配置/data/haproxy/haproxy.cfg,实现负载均衡与高可用
frontend minio_front mode tcp bind *:9000 timeout connect 3s timeout server 60s option tcp-check default_backend minio_back backend minio_back balance roundrobin server minio1 minio1:9000 check inter 1s server minio2 minio2:9000 check inter 1s server minio3 minio3:9000 check inter 1s server minio4 minio4:9000 check inter 1s
nginx配置web负载均衡访问
http { ..... upstream minio_cluster { hash $request_uri; server minio1:9090; server minio2:9090; server minio3:9090; server minio4:9090; } server { location / { proxy_set_header Host $http_host; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; ignore_invalid_headers off; proxy_buffering off; proxy_request_buffering off; client_body_buffer_size 10M; client_max_body_size 10G; proxy_read_timeout 300; proxy_next_upstream error timeout http_404; proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "upgrade"; proxy_pass http://minio_cluster; } location ~ .*\.(gif|jpg|jpeg|bmp|png|ico|txt|js|css|mp4|svg|woff2|map|ts|tsx|html|text)$ { proxy_pass http://minio_cluster; expires 1h; } } }