搭建ceph集群
所有节点关闭firewall和selinux
systemctl stop firewalld
systemctl disable firewalld
setenforce 0
vim /etc/selimux/config
selinux=disabled
添加国内ceph源
cat >/etc/yum.repos.d/ceph.repo<<EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-luminous/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
EOF
yum makecache
在每一个node节点执行
useradd ceph-admin
echo 'ceph-admin' | passwd --stdin ceph-admin
echo "ceph-admin ALL = (root) NOPASSWD:ALL" > /etc/sudoers.d/ceph-admin
chmod 0440 /etc/sudoers.d/ceph-admin
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
systemctl reload sshd
sed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers
cat >>/etc/hosts<<EOF
192.168.1.111 ceph1
192.168.1.112 ceph2
192.168.1.113 ceph3
EOF
mkfs.xfs /dev/sdb
在admin节点执行
su - ceph-admin
ssh-keygen
ssh-copy-id ceph-admin@ceph1
ssh-copy-id ceph-admin@ceph2
ssh-copy-id ceph-admin@ceph3
admin节点安装ceph-deploy
yum install -y ceph-deploy
mkdir my-cluster
cd my-cluster
进行节点部署
ceph-deploy new ceph1 ceph2 ceph3
ceph.conf
ceph-deploy-ceph.log
ceph.mon.keyring
sudo vim ~/my-cluster/ceph.conf
public network = 192.168.0.0/23
[osd]
osd_max_object_name_len = 256
osd_max_object_namespace_len = 64
rbd_default_features = 1
osd_pool_default_size = 3
filestore_xattr_use_omap = true
[mon]
mon_pg_warn_max_per_osd = 1000
ceph-deploy install ceph1 ceph2 ceph3
ceph-deploy mon create-initial
ceph-deploy admin ceph1 ceph2 ceph3
各节点创建osd(需要创建的磁盘均需要执行)
ceph-deploy --overwrite-conf config push ceph{1..3}
ceph-deploy disk zap ceph1 /dev/sdb
ceph-deploy gatherkeys ceph{1..3}
ceph-deploy osd create ceph1 --data /dev/sdb
启动mds、mgr
ceph-deploy mds create ceph1 ceph2 ceph3
ceph-deploy mgr create ceph1 ceph2 ceph3
开启dashboard模块,启用浏览器界面
sudo chown -R ceph-admin /etc/ceph
ceph mgr module enable dashboard
http://192.168.1.111:7000
CephFS需要使用两个Pool来分别存储数据和元数据
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools
创建一个CephFS
ceph fs new cephfs fs_metadata fs_data
第三方客户端使用cephfs
mount -t ceph ceph1:6789,ceph2:6789,ceph3:6789:/ /mnt/mycephfs -o name=admin,secret=`ceph auth print-key client.admin`
卸载ceph
ceph-deploy purge ceph01
ceph-deploy purgedata ceph01
rm -rf /var/lib/ceph
rm -rf /etc/ceph
rm -rf /var/run/ceph/
评论区