目次

38 cephadmでcephインストール後、openstackで利用

1.Ceph install

cephadm install

# curl --silent --remote-name --location https://github.com/ceph/ceph/raw/octopus/src/cephadm/cephadm
# chmod +x cephadm

add repogitory

# echo deb https://download.ceph.com/debian-octopus/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
# apt update

quincy

# echo deb https://download.ceph.com/debian-quincy/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
# apt update

install cephadm

# ./cephadm install

initial bootstrap ceph

192.168.0.101は、ストレージネットワーク

# mkdir -p /etc/ceph
# cephadm bootstrap --mon-ip 192.168.0.101 --initial-dashboard-user admin --initial-dashboard-password XXXXX

install ceph-common

# apt install ceph-common

cephadmにホスト登録しラベル付け

ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph01
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph02
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph03

ceph orch host add ceph01 192.168.0.101
ceph orch host add ceph02 192.168.0.102
ceph orch host add ceph03 192.168.0.103

ceph orch host label ceph01 mon
ceph orch host label ceph02 mon
ceph orch host label ceph03 mon
ceph orch host label ceph01 osd
ceph orch host label ceph02 osd
ceph orch host label ceph03 osd
ceph orch host label ceph01 mgr

確認

# ceph orch host ls
HOST     ADDR            LABELS       STATUS  
ceph01 192.168.0.101  _admin mon osd mgr          
ceph02 192.168.0.102  mon osd              
ceph03 192.168.0.103  mon osd 
3 hosts in cluster

configファイル用意

# vim ceph_cluster_conf.yaml
service_type: mon
placement:
  count: 3
  label: mon
---
service_type: mgr
placement:
  count: 1
  label: mgr
---
service_type: osd
service_id: osd_using_paths
placement:
  hosts:
    - ceph01 
    - ceph02 
    - ceph03 
spec:
  data_devices:
    paths:
    - /dev/sdb
    - /dev/sdc

デプロイ

# ceph orch apply -i ceph_cluster_conf.yaml

下記で見ているとだんだんcephが構築されていってるのが見える

watch ceph -s

cephadmでceph.confを持ってくる場合

cephadm shell cat /etc/ceph/ceph.conf > /etc/ceph/ceph.conf

2 openstack用設定

openstack用のpoolを用意しておく

# ceph osd pool create volumes
# ceph osd pool create images
# ceph osd pool create backups
# ceph osd pool create vms

# rbd pool init volumes
# rbd pool init images
# rbd pool init backups
# rbd pool init vms

openstack用のkeyingを用意しておく

# ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' -o /etc/ceph/ceph.client.glance.keyring
# ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=images' -o /etc/ceph/ceph.client.cinder.keyring
# ceph auth get-or-create client.nova mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=vms, allow rx pool=images' -o /etc/ceph/ceph.client.nova.keyring
# ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups' -o /etc/ceph/ceph.client.cinder-backup.keyring 

OpenStack設定変更

このrbd-1ってのが、cephになります。

# vi /etc/kolla/globals.yml

# Cinder
enable_cinder: "yes"
enable_cinder_backend_nfs: "yes"
skip_cinder_backend_check: True
cinder_enabled_backends:
  - name: rbd-1
  - name: linstor-drbd
  - name: nfs-1
  
# Cinder-Backup
enable_cinder_backup: "yes"
cinder_backup_driver: "nfs"
cinder_backup_share: "192.168.0.101:/nfs"

## ceph
glance_backend_ceph: "yes"
cinder_backend_ceph: "yes"

ceph.conf修正

ansibleを流すときに、ceph.confにタブがあると失敗するのでタブを削除

sed -i 's/\t//'g /etc/ceph/ceph.conf

ceph用のファイル用意

mkdir /etc/kolla/config
mkdir /etc/kolla/config/nova
mkdir /etc/kolla/config/glance
mkdir -p /etc/kolla/config/cinder/cinder-volume
mkdir /etc/kolla/config/cinder/cinder-backup

cp /etc/ceph/ceph.conf /etc/kolla/config/cinder/
cp /etc/ceph/ceph.conf /etc/kolla/config/nova/
cp /etc/ceph/ceph.conf /etc/kolla/config/glance/
cp /etc/ceph/ceph.client.glance.keyring /etc/kolla/config/glance/
cp /etc/ceph/ceph.client.nova.keyring /etc/kolla/config/nova/
cp /etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/nova/
cp /etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-volume/
cp /etc/ceph/ceph.client.cinder.keyring /etc/kolla/config/cinder/cinder-backup/
cp /etc/ceph/ceph.client.cinder-backup.keyring /etc/kolla/config/cinder/cinder-backup/

deploy

kolla-ansible -i ./multinode deploy