Ssup2 Blog logo Ssup2 Blog



1. 설정 환경

2. Node 설정

2.1. Ceph Node

network:
    version: 2
    ethernets:
        enp0s3:
            dhcp4: no
            addresses: [10.0.0.10/24]
            gateway4: 10.0.0.1
            nameservers:
                addresses: [8.8.8.8]
        enp0s8:
            dhcp4: no
            addresses: [192.168.0.150/24]
            nameservers:
                addresses: [8.8.8.8]
network:
    version: 2
    ethernets:
        enp0s3:
            dhcp4: no
            addresses: [10.0.0.20/24]
            gateway4: 10.0.0.1
            nameservers:
                addresses: [8.8.8.8]
network:
    version: 2
    ethernets:
        enp0s3:
            dhcp4: no
            addresses: [10.0.0.30/24]
            gateway4: 10.0.0.1
            nameservers:
                addresses: [8.8.8.8]

3. Package 설치

3.1. Ceph Node

# sudo apt install ntp
# sudo apt install python
# sudo useradd -d /home/cephnode -m cephnode
# sudo passwd cephnode
Enter new UNIX password:
Retype new UNIX password:
passwd: password updated successfully

# echo "cephnode ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephnode
# sudo chmod 0440 /etc/sudoers.d/cephnode

3.2. Deploy Node

10.0.0.10 node1
10.0.0.20 node2
10.0.0.30 node3
# wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
# echo deb https://download.ceph.com/debian-luminous/ $(lsb_release -sc) main | sudo tee /etc/apt/sources.list.d/ceph.list
# sudo apt update
# sudo apt install ceph-deploy
# sudo useradd -d /home/cephdeploy -m cephdeploy
# sudo passwd cephdeploy
Enter new UNIX password:
Retype new UNIX password:
passwd: password updated successfully

# echo "cephdeploy ALL = (root) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/cephdeploy
# sudo chmod 0440 /etc/sudoers.d/cecephdeployphnode
# login cephdeploy
$ ssh-keygen
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
...

$ ssh-copy-id cephnode@node1
$ ssh-copy-id cephnode@node2
$ ssh-copy-id cephnode@node3
Host node1
   Hostname node1
   User cephnode
Host node2
   Hostname node2
   User cephnode
Host node3
   Hostname node3
   User cephnode

4. Storage Cluster 구성

4.1. Depoly Node

# login cephdeploy
$ mkdir my-cluster
# login cephdeploy
$ cd ~/my-cluster
$ ceph-deploy purge node1 node2 node3
$ ceph-deploy purgedata node1 node2 node3
$ ceph-deploy forgetkeys
$ rm ceph.*
# login cephdeploy
$ cd ~/my-cluster
$ ceph-deploy new node1
$ ceph-deploy install node1 node2 node3
$ ceph-deploy mon create-initial
$ ceph-deploy admin node1 node2 node3
$ ceph-deploy mgr create node1
$ ceph-deploy osd create --data /dev/sdb node1
$ ceph-deploy osd create --data /dev/sdb node2
$ ceph-deploy osd create --data /dev/sdb node3
$ sudo ceph -s
  cluster:
    id:     20261612-97fc-4a45-bd81-0d9c9b445e00
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node1
    mgr: node1(active)
    osd: 3 osds: 3 up, 3 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   3.0 GiB used, 597 GiB / 600 GiB avail
    pgs:   
# login cephdeploy
$ cd ~/my-cluster
$ ceph-deploy mds create node1
$ sudo ceph -s
  cluster:
    id:     20261612-97fc-4a45-bd81-0d9c9b445e00
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node1
    mgr: node1(active)
    osd: 3 osds: 3 up, 3 in

  data:
    pools:   0 pools, 0 pgs
    objects: 0  objects, 0 B
    usage:   3.0 GiB used, 597 GiB / 600 GiB avail
    pgs:  
# login cephdeploy
$ cd ~/my-cluster
$ ceph-deploy rgw create node1
$ sudo ceph -s 
  cluster:
    id:     20261612-97fc-4a45-bd81-0d9c9b445e00
    health: HEALTH_OK

  services:
    mon: 1 daemons, quorum node1
    mgr: node1(active)
    osd: 3 osds: 3 up, 3 in
    rgw: 1 daemon active

  data:
    pools:   4 pools, 32 pgs
    objects: 187  objects, 1.1 KiB
    usage:   3.0 GiB used, 597 GiB / 600 GiB avail
    pgs:     32 active+clean

5. Block Storage Test

5.1. Ceph Node

# ceph osd pool create rbd 16
# rbd pool init rbd
# rbd create foo --size 4096 --image-feature layering
# rbd map foo --name client.admin
/dev/rbd0

6. File Storage Test

6.1. Ceph Node

# ceph osd pool create cephfs_data 16
# ceph osd pool create cephfs_metadata 16
# ceph fs new filesystem cephfs_metadata cephfs_data
# cat /home/cephdeploy/my-cluster/ceph.client.admin.keyring
[client.admin]
        key = AQAk1SxcbTz/IBAAHCPTQ5x1SHFcA0fn2tTW7w==
        caps mds = "allow *"
        caps mgr = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"

# vim admin.secret
AQAk1SxcbTz/IBAAHCPTQ5x1SHFcA0fn2tTW7w==
# mkdir mnt
# mount -t ceph 10.0.0.10:6789:/ mnt/ -o name=admin,secretfile=admin.secret
# mount
...
10.0.0.10:6789:/ on /root/test/ceph/mnt type ceph (rw,relatime,name=admin,secret=<hidden>,acl,wsize=16777216)

7. Object Storage Test

7.1. Ceph Node

# curl 127.0.0.1:7480
<?xml version="1.0" encoding="UTF-8"?><ListAllMyBucketsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Owner><ID>anonymous</ID><DisplayName></DisplayName></Owner><Buckets></Buckets></ListAllMyBucketsResult>

8. 참조