目标

3台主机,每台挂载1个系统盘和1个数据盘

创建主机

创建磁盘

qemu-img create -f qcow2 /data/vdisk/ceph{1|2|3}-{1}.img 16G

qemu-img create -f qcow2 /data/vdisk/ceph1-1.img 16G

基于已有虚机配置复制主机

virt-clone -o kvm-bak -n kvm-ceph1 -f /data/kvm/images/kvm-ceph1.img
# 查询虚机列表
virsh list --all

 -     kvm-bak                        关闭
 -     kvm-ceph1                      关闭
 -     kvm-ceph2                      关闭
 -     kvm-ceph3                      关闭
 ```

#### 挂载磁盘
``` shell
# 编辑kvm-ceph1 
virsh edit kvm-ceph1
找到以下配置
<disk type='file' device='disk'>
  <driver name='qemu' type='raw'/>
  <source file='/data/kvm/images/kvm-ceph1.img'/>
  <target dev='vda' bus='virtio'/>
  <address type='pci' domain='0x0000' bus='0x00' slot='0x06' function='0x0'/>
</disk>
后面添加一下配置
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='/data/vdisk/ceph1-1.img'/>
<target dev='vdb' bus='virtio'/>
</disk>

配置主机环境

配置网络,hosts,和hostname

# 修改hosts 添加kvm-ceph1
vi /etc/hosts
127.0.0.1 kvm-ceph1  localhost localhost.localdomain localhost4 localhost4.locall
domain4
::1       kvm-ceph1  localhost localhost.localdomain localhost6 localhost6.locall
domain6

# 修改hostname
hostnamectl set-hostname kvm-ceph1

# 编辑ip config
vi /etc/sysconfig/network-scripts/ifcfg-eth0
修改IPADDR,GATEWAY,NETMASK属性

预检

node节点安装

  • Ceph 节点上安装 NTP 服务
yum install ntp ntpdate ntp-doc -y
  • Ceph 节点上安装 SSH 服务器
yum install openssh-server -y
  • 创建用户
useradd -d /home/ceph_deploy -m ceph_deploy
passwd ceph_deploy
# 设置复杂密码
#确保各 Ceph 节点上新创建的用户都有 sudo 权限。
echo "ceph_deploy ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/ceph_deploy
sudo chmod 0440 /etc/sudoers.d/ceph_deploy
  • 开放端口
firewall-cmd --zone=public --add-port=6789/tcp --permanent
firewall-cmd --zone=public --add-port=6800-7300/tcp --permanent
firewall-cmd --reload
  • 禁用SELinux
sudo setenforce 0

在管理节点执行

  • 安装 ceph-deploy
cat <<EOF > /etc/yum.repos.d/ceph.repo
[Ceph]
name=Ceph packages x86_64
baseurl=http://mirrors.aliyun.com/ceph/rpm-infernalis/el7/x86_64
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
priority=1

[Ceph-noarch]
name=Ceph noarch packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-infernalis/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
priority=1

[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.aliyun.com/ceph/rpm-infernalis/el7/SRPMS
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.aliyun.com/ceph/keys/release.asc
priority=1
EOF

yum install ceph-deploy -y
  • 允许无密码 SSH 登录
ssh-keygen

cat <<EOF > ~/.ssh/config
Host kvm-ceph1
   Hostname kvm-ceph1
   User ceph_deploy
Host kvm-ceph2
   Hostname kvm-ceph2
   User ceph_deploy
Host kvm-ceph3
   Hostname kvm-ceph3
   User ceph_deploy
EOF

在/etc/hosts添加如下配置
10.115.223.201 kvm-ceph1
10.115.223.202 kvm-ceph2
10.115.223.203 kvm-ceph3

ssh-copy-id -i ~/.ssh/id_rsa.pub ceph_deploy@kvm-ceph1
ssh-copy-id -i ~/.ssh/id_rsa.pub ceph_deploy@kvm-ceph2
ssh-copy-id -i ~/.ssh/id_rsa.pub ceph_deploy@kvm-ceph3

安装集群

admin节点

# 用于存储安装过程生成的信息
mkdir ceph-cluster
cd ceph-cluster

ceph-deploy new kvm-ceph1 kvm-ceph2 kvm-ceph3
# 当前目录会生成3个文件分别为:ceph.conf  ceph-deploy-ceph.log  ceph.mon.keyring

# 把 Ceph 配置文件里的默认副本数从 3 改成 2
在ceph.conf -> [global]下添加如下内容
osd pool default size = 2

# 执行安装
ceph-deploy install 118 kvm-ceph1 kvm-ceph2 kvm-ceph3
# 118  为本机的主机名称

# 配置初始 monitor
ceph-deploy mon create-initial


#激活osd
ceph-deploy osd create  kvm-ceph1:/dev/vdb kvm-ceph1
ceph-deploy osd create  kvm-ceph2:/dev/vdb
ceph-deploy osd create  kvm-ceph3:/dev/vdb

验证

# 在kvm-ceph1节点操作
[root@kvm-ceph1 ~]#  ceph -s
cluster 92a37e08-bbc0-4a56-b9d6-e21b4f2077a1
health HEALTH_OK
monmap e1: 3 mons at {kvm-ceph1=10.115.223.201:6789/0,kvm-ceph2=10.115.223.202:6789/0,kvm-ceph3=10.115.223.203:6789/0}
    election epoch 8, quorum 0,1,2 kvm-ceph1,kvm-ceph2,kvm-ceph3
osdmap e16: 3 osds: 3 up, 3 in
    flags sortbitwise,require_jewel_osds
pgmap v58: 64 pgs, 1 pools, 0 bytes data, 0 objects
    322 MB used, 33436 MB / 33758 MB avail
          64 active+clean

错误解决

  • ImportError: No module named pkg_resources

解决:
执行yum install python-setuptools -y

  • Delta RPMs disabled because /usr/bin/applydeltarpm not installed

解决
yum install deltarpm -y

  • 不能创建osd
# 不能创建osd 并显示如下错误
[kvm-ceph3][ERROR ] RuntimeError: command returned non-zero exit status: 2
[ceph_deploy.osd][ERROR ] Failed to execute command: /usr/sbin/ceph-volume --cluster ceph lvm create --bluestore --data /dev/vdb2
[ceph_deploy][ERROR ] GenericError: Failed to create 1 OSDs

解决
“` shell

查看分区

fdisk -l

部分显示如下:

磁盘 /dev/mapper/ceph–205ecc88–3e81–479a–bb68–fde3cab99495-osd–block–c54f850d–0c9b–44b9–8f51–38267ce2f0cd:17.2 GB, 17175674880 字节,33546240 个扇区
Units = 扇区 of 1 * 512 = 512 bytes
扇区大小(逻辑/物理):512 字节 / 512 字节
I/O 大小(最小/最佳):512 字节 / 512 字节

删除

lvremove /dev/mapper/ceph–205ecc88–3e81–479a–bb68–fde3cab99495-osd–block–c54f850d–0c9b–44b9–8f51–38267ce2f0cd

显示

pvs
PV VG Fmt Attr PSize PFree
/dev/vdb ceph-205ecc88-3e81-479a-bb68-fde3cab99495 lvm2 a– <16.00g <16.00g

删除

vgremove ceph-205ecc88-3e81-479a-bb68-fde3cab99495

删除

pvremove /dev/vdb