iSt0ne's Notes

在Openstack上运行CoreOS和Kubernetes(多节点)

Kubernetes是一款由谷歌开发的工具,旨在简化对云环境下Docker Linux容器的管理流程。谷歌于在DockerCon大会上正式启动Kubernetes项目。 自6月开源以来,受到各大厂商的关注,目前包括Microsoft,RedHat、Docker等企业都纷纷加入Kubernetes社区,这无疑让Kubernetes项目更为引人注目。CoreOS 也正在努力确保其分布式架构操作系统能够适用于Kubernetes。

1、在Openstack上部署CoreOS

在Openstack默认规则中开启任意网段对4001、7001、10250端口的访问。

[root@node-9 ~]# wget https://github.com/kelseyhightower/kubernetes-coreos/releases/download/v0.0.5/kubernetes-coreos-0.0.5.tar.gz

[root@node-9 ~]# tar xzvf kubernetes-coreos-0.0.5.tar.gz
[root@node-9 ~]# cd kubernetes-coreos/configs/

注意:由于访问storage.googleapis.com有问题,所有master.yml、node1.yml、node2.yml中storage.googleapis.com地址全部换到自己搭建的web服务器上(192.168.101.59)。

[root@node-9 configs]# cat master.yml

#cloud-config


#修改DNS为8.8.4.4和8.8.8.8
write_files:
  - path: /etc/resolv.conf
    permissions: 0644
    content: |
      # Generated by NetworkManager
      domain openstacklocal
      search openstacklocal novalocal
      nameserver 8.8.4.4
      nameserver 8.8.8.8
hostname: master
coreos:
  etcd:
    name: master
    addr: 192.168.101.10:4001
    bind-addr: 0.0.0.0
    peer-addr: 192.168.101.10:7001
    peer-heartbeat-interval: 250
    peer-election-timeout: 1000
  units:
    - name: static.network
      command: start
      content: |
        [Match]
        Name=ens33

        [Network]
        Address=192.168.101.10/24
        DNS=8.8.8.8
        Gateway=192.168.101.1
    - name: cbr0.netdev
      command: start
      content: |
        [NetDev]
        Kind=bridge
        Name=cbr0
    - name: cbr0.network
      command: start
      content: |
        [Match]
        Name=cbr0

        [Network]
        Address=10.244.0.1/24

        [Route]
        Destination=10.0.0.0/8
        Gateway=0.0.0.0
    - name: cbr0-interface.network
      command: start
      content: |
        [Match]
        Name=ens34

        [Network]
        Bridge=cbr0
    - name: nat.service
      command: start
      content: |
        [Unit]
        Description=NAT non container traffic

        [Service]
        ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o ens33 -j MASQUERADE ! -d 10.0.0.0/8
        RemainAfterExit=yes
        Type=oneshot
    - name: etcd.service
      command: start
    - name: fleet.service
      command: start
    - name: docker.service
      command: start
      content: |
        [Unit]
        After=network.target
        Description=Docker Application Container Engine
        Documentation=http://docs.docker.io

        [Service]
        ExecStartPre=/bin/mount --make-rprivate /
        ExecStart=/usr/bin/docker -d -s=btrfs -H fd:// -b cbr0 --iptables=false

        [Install]
        WantedBy=multi-user.target
    - name: download-kubernetes.service
      command: start
      content: |
        [Unit]
        After=network-online.target
        Before=apiserver.service
        Before=controller-manager.service
        Before=kubelet.service
        Before=proxy.service
        Description=Download Kubernetes Binaries
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Requires=network-online.target

        [Service]
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/apiserver
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/controller-manager
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/kubecfg
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/kubelet
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/proxy
        ExecStart=/usr/bin/chmod +x /opt/bin/apiserver
        ExecStart=/usr/bin/chmod +x /opt/bin/controller-manager
        ExecStart=/usr/bin/chmod +x /opt/bin/kubecfg
        ExecStart=/usr/bin/chmod +x /opt/bin/kubelet
        ExecStart=/usr/bin/chmod +x /opt/bin/proxy
        RemainAfterExit=yes
        Type=oneshot
    - name: apiserver.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/apiserver
        Description=Kubernetes API Server
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/apiserver \
        --address=127.0.0.1 \
        --port=8080 \
        --etcd_servers=http://127.0.0.1:4001 \
        --machines=192.168.101.10,192.168.101.11,192.168.101.12 \
        --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
    - name: controller-manager.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/controller-manager
        Description=Kubernetes Controller Manager
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/controller-manager \
        --master=127.0.0.1:8080 \
        --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
    - name: kubelet.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/kubelet
        Description=Kubernetes Kubelet
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/kubelet \
        --address=0.0.0.0 \
        --port=10250 \
        --hostname_override=192.168.101.10 \
        --etcd_servers=http://127.0.0.1:4001 \
        --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
    - name: proxy.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/proxy
        Description=Kubernetes Proxy
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/proxy --etcd_servers=http://127.0.0.1:4001 --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
  update:
    group: stable
    reboot-strategy: off
ssh_authorized_keys:
  - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2+jvss6ZlGD2In6kAjmiFaDbOM/yELOeDwBmYElwh3mFcIuldcnOdKfDQOaBeUWiHS1ImE6cC/VHg1uR2x/JmFJYmDooBJgswhv/MHNr62D3iJCtZ0mLkJHDIyXVFbwPFk94b1V2Mx48RgzPkC7Wy47OBc0vFgIAeGZVUjbPwUVyUTkIPA1p3M2L9V0cXNpMCBBRHL6rFWg2Cl/3nwvXD199mualpHL5N5/iINWdHGNCEzWIslI0lJoRhN/cNmlxiDCRWNgJGaPpNTnJHQkOUFHeONDNvg3tbrZmtDdWsH44cVOEb4qinXsSgnFNMxdsUUbYy3xP3H52rAPv/rVKCQ== root@2263cf8be602”

[root@node-9 configs]# cat node1.yml

#cloud-config

write_files:
  - path: /etc/resolv.conf
    permissions: 0644
    content: |
      # Generated by NetworkManager
      domain openstacklocal
      search openstacklocal novalocal
      nameserver 8.8.4.4
      nameserver 8.8.8.8
hostname: node1
coreos:
  etcd:
    name: node1
    addr: 192.168.101.11:4001
    bind-addr: 0.0.0.0
    peer-addr: 192.168.101.11:7001
    peers: 192.168.101.10:7001,192.168.101.11:7001
    peer-heartbeat-interval: 250
    peer-election-timeout: 1000
  units:
    - name: static.network
      command: start
      content: |
        [Match]
        Name=ens33

        [Network]
        Address=192.168.101.11/24
        DNS=8.8.8.8
        Gateway=192.168.101.1
    - name: cbr0.netdev
      command: start
      content: |
        [NetDev]
        Kind=bridge
        Name=cbr0
    - name: cbr0.network
      command: start
      content: |
        [Match]
        Name=cbr0

        [Network]
        Address=10.244.1.1/24

        [Route]
        Destination=10.0.0.0/8
        Gateway=0.0.0.0
    - name: cbr0-interface.network
      command: start
      content: |
        [Match]
        Name=ens34

        [Network]
        Bridge=cbr0
    - name: nat.service
      command: start
      content: |
        [Unit]
        Description=NAT non container traffic

        [Service]
        ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o ens33 -j MASQUERADE ! -d 10.0.0.0/8
        RemainAfterExit=yes
        Type=oneshot
    - name: etcd.service
      command: start
    - name: fleet.service
      command: start
    - name: docker.service
      command: start
      content: |
        [Unit]
        After=network.target
        Description=Docker Application Container Engine
        Documentation=http://docs.docker.io

        [Service]
        ExecStartPre=/bin/mount --make-rprivate /
        ExecStart=/usr/bin/docker -d -s=btrfs -H fd:// -b cbr0 --iptables=false

        [Install]
        WantedBy=multi-user.target
    - name: download-kubernetes.service
      command: start
      content: |
        [Unit]
        After=network-online.target
        Before=kubelet.service
        Before=proxy.service
        Description=Download Kubernetes Binaries
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Requires=network-online.target

        [Service]
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/kubelet
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/proxy
        ExecStart=/usr/bin/chmod +x /opt/bin/kubelet
        ExecStart=/usr/bin/chmod +x /opt/bin/proxy
        RemainAfterExit=yes
        Type=oneshot
    - name: kubelet.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/kubelet
        Description=Kubernetes Kubelet
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/kubelet \
        --address=0.0.0.0 \
        --port=10250 \
        --hostname_override=192.168.101.11 \
        --etcd_servers=http://127.0.0.1:4001 \
        --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
    - name: proxy.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/proxy
        Description=Kubernetes Proxy
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/proxy --etcd_servers=http://127.0.0.1:4001 --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
  update:
    group: stable
    reboot-strategy: off
ssh_authorized_keys:
  - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2+jvss6ZlGD2In6kAjmiFaDbOM/yELOeDwBmYElwh3mFcIuldcnOdKfDQOaBeUWiHS1ImE6cC/VHg1uR2x/JmFJYmDooBJgswhv/MHNr62D3iJCtZ0mLkJHDIyXVFbwPFk94b1V2Mx48RgzPkC7Wy47OBc0vFgIAeGZVUjbPwUVyUTkIPA1p3M2L9V0cXNpMCBBRHL6rFWg2Cl/3nwvXD199mualpHL5N5/iINWdHGNCEzWIslI0lJoRhN/cNmlxiDCRWNgJGaPpNTnJHQkOUFHeONDNvg3tbrZmtDdWsH44cVOEb4qinXsSgnFNMxdsUUbYy3xP3H52rAPv/rVKCQ== root@2263cf8be602”

[root@node-9 configs]# cat node2.yml

#cloud-config

write_files:
  - path: /etc/resolv.conf
    permissions: 0644
    content: |
      # Generated by NetworkManager
      domain openstacklocal
      search openstacklocal novalocal
      nameserver 8.8.4.4
      nameserver 8.8.8.8
hostname: node2
coreos:
  etcd:
    name: node2
    addr: 192.168.101.12:4001
    bind-addr: 0.0.0.0
    peer-addr: 192.168.101.12:7001
    peers: 192.168.101.10:7001,192.168.101.11:7001
    peer-heartbeat-interval: 250
    peer-election-timeout: 1000
  units:
    - name: static.network
      command: start
      content: |
        [Match]
        Name=ens33

        [Network]
        Address=192.168.101.12/24
        DNS=8.8.8.8
        Gateway=192.168.101.1
    - name: cbr0.netdev
      command: start
      content: |
        [NetDev]
        Kind=bridge
        Name=cbr0
    - name: cbr0.network
      command: start
      content: |
        [Match]
        Name=cbr0

        [Network]
        Address=10.244.2.1/24

        [Route]
        Destination=10.0.0.0/8
        Gateway=0.0.0.0
    - name: cbr0-interface.network
      command: start
      content: |
        [Match]
        Name=ens34

        [Network]
        Bridge=cbr0
    - name: nat.service
      command: start
      content: |
        [Unit]
        Description=NAT non container traffic

        [Service]
        ExecStart=/usr/sbin/iptables -t nat -A POSTROUTING -o ens33 -j MASQUERADE ! -d 10.0.0.0/8
        RemainAfterExit=yes
        Type=oneshot
    - name: etcd.service
      command: start
    - name: fleet.service
      command: start
    - name: docker.service
      command: start
      content: |
        [Unit]
        After=network.target
        Description=Docker Application Container Engine
        Documentation=http://docs.docker.io

        [Service]
        ExecStartPre=/bin/mount --make-rprivate /
        ExecStart=/usr/bin/docker -d -s=btrfs -H fd:// -b cbr0 --iptables=false

        [Install]
        WantedBy=multi-user.target
    - name: download-kubernetes.service
      command: start
      content: |
        [Unit]
        After=network-online.target
        Before=kubelet.service
        Before=proxy.service
        Description=Download Kubernetes Binaries
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Requires=network-online.target

        [Service]
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/kubelet
        ExecStart=/usr/bin/wget -N -P /opt/bin http://192.168.101.59/kubernetes/proxy
        ExecStart=/usr/bin/chmod +x /opt/bin/kubelet
        ExecStart=/usr/bin/chmod +x /opt/bin/proxy
        RemainAfterExit=yes
        Type=oneshot
    - name: kubelet.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/kubelet
        Description=Kubernetes Kubelet
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/kubelet \
        --address=0.0.0.0 \
        --port=10250 \
        --hostname_override=192.168.101.12 \
        --etcd_servers=http://127.0.0.1:4001 \
        --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
    - name: proxy.service
      command: start
      content: |
        [Unit]
        After=etcd.service
        After=download-kubernetes.service
        ConditionFileIsExecutable=/opt/bin/proxy
        Description=Kubernetes Proxy
        Documentation=https://github.com/GoogleCloudPlatform/kubernetes
        Wants=etcd.service
        Wants=download-kubernetes.service

        [Service]
        ExecStart=/opt/bin/proxy --etcd_servers=http://127.0.0.1:4001 --logtostderr=true
        Restart=always
        RestartSec=10

        [Install]
        WantedBy=multi-user.target
  update:
    group: stable
    reboot-strategy: off
ssh_authorized_keys:
  - "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA2+jvss6ZlGD2In6kAjmiFaDbOM/yELOeDwBmYElwh3mFcIuldcnOdKfDQOaBeUWiHS1ImE6cC/VHg1uR2x/JmFJYmDooBJgswhv/MHNr62D3iJCtZ0mLkJHDIyXVFbwPFk94b1V2Mx48RgzPkC7Wy47OBc0vFgIAeGZVUjbPwUVyUTkIPA1p3M2L9V0cXNpMCBBRHL6rFWg2Cl/3nwvXD199mualpHL5N5/iINWdHGNCEzWIslI0lJoRhN/cNmlxiDCRWNgJGaPpNTnJHQkOUFHeONDNvg3tbrZmtDdWsH44cVOEb4qinXsSgnFNMxdsUUbYy3xP3H52rAPv/rVKCQ== root@2263cf8be602”

启动CoreOS:

启动master:

nova boot \
--user-data ./master.yml \
--image c3c0b86d-b2a6-4129-9bbd-b27fa4f88e06 \
--key-name fuel \
--flavor m1.small \
--security-groups default \
--nic net-id=284d3f00-042b-41b2-bec4-f5bc7dcee037,v4-fixed-ip=192.168.101.10 \
kubernete_master

启动node1:

nova boot \
--user-data ./node1.yml \
--image c3c0b86d-b2a6-4129-9bbd-b27fa4f88e06 \
--key-name fuel \
--flavor m1.small \
--security-groups default \
--nic net-id=284d3f00-042b-41b2-bec4-f5bc7dcee037,v4-fixed-ip=192.168.101.11 \
kubernete_node1

启动node2:

nova boot \
--user-data ./node2.yml \
--image c3c0b86d-b2a6-4129-9bbd-b27fa4f88e06 \
--key-name fuel \
--flavor m1.small \
--security-groups default \
--nic net-id=284d3f00-042b-41b2-bec4-f5bc7dcee037,v4-fixed-ip=192.168.101.12 \
kubernete_node2

如下绑定浮动IP:

master:172.16.200.135
node1:172.16.200.136
node2:172.16.200.137

上面配置文件中注入的是node-9 的ssh key公钥,所以需要在这台机器上登录。

登陆master:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
[root@node-9 ~]# ssh core@172.16.200.135
CoreOS (stable)
core@master ~ $ sudo su -
master ~ # netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 633/apiserver
tcp6 0 0 :::22 :::* LISTEN 1/systemd
tcp6 0 0 :::7001 :::* LISTEN 601/etcd
tcp6 0 0 :::4001 :::* LISTEN 601/etcd
tcp6 0 0 :::10250 :::* LISTEN 654/kubelet
udp 0 0 10.244.0.1:123 0.0.0.0:* 466/ntpd
udp 0 0 192.168.101.10:123 0.0.0.0:* 466/ntpd
udp 0 0 127.0.0.1:123 0.0.0.0:* 466/ntpd
udp 0 0 0.0.0.0:123 0.0.0.0:* 466/ntpd
udp6 0 0 fe80::f816:3eff:fe9:123 :::* 466/ntpd
udp6 0 0 ::1:123 :::* 466/ntpd
udp6 0 0 :::123 :::* 466/ntpd

登陆node1:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@node-9 ~]# ssh core@172.16.200.136
CoreOS (stable)
core@node1 ~ $ sudo su -
node1 ~ # netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp6 0 0 :::22 :::* LISTEN 1/systemd
tcp6 0 0 :::7001 :::* LISTEN 603/etcd
tcp6 0 0 :::4001 :::* LISTEN 603/etcd
tcp6 0 0 :::10250 :::* LISTEN 624/kubelet
udp 0 0 10.244.1.1:123 0.0.0.0:* 466/ntpd
udp 0 0 192.168.101.11:123 0.0.0.0:* 466/ntpd
udp 0 0 127.0.0.1:123 0.0.0.0:* 466/ntpd
udp 0 0 0.0.0.0:123 0.0.0.0:* 466/ntpd
udp6 0 0 fe80::f816:3eff:fe0:123 :::* 466/ntpd
udp6 0 0 ::1:123 :::* 466/ntpd
udp6 0 0 :::123 :::* 466/ntpd

登陆node2:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
[root@node-9 ~]# ssh core@172.16.200.137
CoreOS (stable)
core@node2 ~ $ sudo su -
node2 ~ # netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp6 0 0 :::22 :::* LISTEN 1/systemd
tcp6 0 0 :::7001 :::* LISTEN 592/etcd
tcp6 0 0 :::4001 :::* LISTEN 592/etcd
tcp6 0 0 :::10250 :::* LISTEN 625/kubelet
udp 0 0 10.244.2.1:123 0.0.0.0:* 467/ntpd
udp 0 0 192.168.101.12:123 0.0.0.0:* 467/ntpd
udp 0 0 127.0.0.1:123 0.0.0.0:* 467/ntpd
udp 0 0 0.0.0.0:123 0.0.0.0:* 467/ntpd
udp6 0 0 fe80::f816:3eff:fed:123 :::* 467/ntpd
udp6 0 0 ::1:123 :::* 467/ntpd
udp6 0 0 :::123 :::* 467/ntpd

Openstack CoreOS

2、在CoreOS集群上使用Kubernetes部署应用

本实例部署一个动态web应用,web前端写数据到主redis服务,在从redis服务上读数据。

使用redis-master.json在容器中部署一个redis 主服务。

master ~ # vi redis-master.json

{
  "id": "redis-master-2",
  "kind": "Pod",
  "apiVersion": "v1beta1",
  "desiredState": {
    "manifest": {
      "version": "v1beta1",
      "id": "redis-master-2",
      "containers": [{
        "name": "master",
        "image": "dockerfile/redis",
        "ports": [{
          "containerPort": 6379,
          "hostPort": 6379
        }]
      }]
    }
  },
  "labels": {
    "name": "redis-master"
  }
}

创建redis pod:

1
2
3
4
5
6
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 -c redis-master.json create pods
I0906 07:36:44.481494 00775 request.go:287] Waiting for completion of /operations/1
Name Image(s) Host Labels
{
---------- ---------- ---------- ----------
redis-master-2 dockerfile/redis 192.168.101.12/ name=redis-master

列出集群中的pods:

1
2
3
4
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 list pods
Name Image(s) Host Labels
---------- ---------- ---------- ----------
redis-master-2 dockerfile/redis 192.168.101.12/ name=redis-master

一个Kubernetes ‘service’就是一个名字负载均衡代理流量到一个或多个容器,这些services可以通过环境变量被其他容器发现。Services负载均衡通过pod labels找到相应的容器。

master ~ # vi redis-master-service.json

{
  "id": "redismaster",
  "kind": "Service",
  "apiVersion": "v1beta1",
  "port": 10000,
  "containerPort": 6379,
  "selector": {
    "name": "redis-master"
  }
}

在上面创建了一个label为name=redis-master的pod,Services通过selector字段将流量负载到pods上,通过redis-master-service.json创建 services:

1
2
3
4
5
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 -c redis-master-service.json create services
I0906 07:52:26.385924 00804 request.go:287] Waiting for completion of /operations/2
Name Labels Selector Port
---------- ---------- ---------- ----------
redismaster name=redis-master 10000

Kubernetes会在每个节点上设置代理,所有的pods通过本地1000端口可以访问redis master。

创建redis slave,如下配置启动两个从redis服务。

master ~ # vi redis-slave-controller.json

{
  "id": "redisSlaveController",
  "kind": "ReplicationController",
  "apiVersion": "v1beta1",
  "desiredState": {
    "replicas": 2,
    "replicaSelector": {"name": "redisslave"},
    "podTemplate": {
      "desiredState": {
         "manifest": {
           "version": "v1beta1",
           "id": "redisSlaveController",
           "containers": [{
             "name": "slave",
             "image": "brendanburns/redis-slave",
             "ports": [{"containerPort": 6379, "hostPort": 6380}]
           }]
         }
       },
       "labels": {"name": "redisslave"}
      }},
  "labels": {"name": "redisslave"}
}
1
2
3
4
5
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 -c redis-slave-controller.json create replicationControllers
I0906 07:54:53.675000 00815 request.go:287] Waiting for completion of /operations/3
Name Image(s) Selector Replicas
---------- ---------- ---------- ----------
redisSlaveController brendanburns/redis-slave name=redisslave 2

redis slave通过在容器变量中读取Kubernetes service变量来进行配置,从服务通过下面命令启动:

redis-server --slaveof $SERVICE_HOST $REDISMASTER_SERVICE_PORT

查看reids master和slave pod已经启动:

1
2
3
4
5
6
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 list pods
Name Image(s) Host Labels
---------- ---------- ---------- ----------
redis-master-2 dockerfile/redis 192.168.101.12/ name=redis-master
1659535d-359b-11e4-ba45-fa163e9cf1d0 brendanburns/redis-slave 192.168.101.12/ name=redisslave,replicationController=redisSlaveController
1659bc1c-359b-11e4-ba45-fa163e9cf1d0 brendanburns/redis-slave 192.168.101.11/ name=redisslave,replicationController=redisSlaveController

创建redis slave service,负载流量到两台从redis上。

master ~ # vi redis-slave-service.json

{
  "id": "redisslave",
  "kind": "Service",
  "apiVersion": "v1beta1",
  "port": 10001,
  "containerPort": 6379,
  "labels": {
    "name": "redisslave"
  },
  "selector": {
    "name": "redisslave"
  }
}
1
2
3
4
5
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 -c redis-slave-service.json create services
I0906 07:58:20.048813 00831 request.go:287] Waiting for completion of /operations/6
Name Labels Selector Port
---------- ---------- ---------- ----------
redisslave name=redisslave name=redisslave 10001

创建前端pod:

master ~ # vi frontend-controller.json

{
  "id": "frontendController",
  "kind": "ReplicationController",
  "apiVersion": "v1beta1",
  "desiredState": {
    "replicas": 3,
    "replicaSelector": {"name": "frontend"},
    "podTemplate": {
      "desiredState": {
         "manifest": {
           "version": "v1beta1",
           "id": "frontendController",
           "containers": [{
             "name": "php-redis",
             "image": "brendanburns/php-redis",
             "ports": [{"containerPort": 80, "hostPort": 8000}]
           }]
         }
       },
       "labels": {"name": "frontend"}
      }},
  "labels": {"name": "frontend"}
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 -c frontend-controller.json create replicationControllers
I0906 08:00:17.366527 00839 request.go:287] Waiting for completion of /operations/7
Name Image(s) Selector Replicas
---------- ---------- ---------- ----------
frontendController brendanburns/php-redis name=frontend 3
master ~ # /opt/bin/kubecfg -h http://127.0.0.1:8080 list pods
Name Image(s) Host Labels
---------- ---------- ---------- ----------
redis-master-2 dockerfile/redis 192.168.101.12/ name=redis-master
1659535d-359b-11e4-ba45-fa163e9cf1d0 brendanburns/redis-slave 192.168.101.12/ name=redisslave,replicationController=redisSlaveController
1659bc1c-359b-11e4-ba45-fa163e9cf1d0 brendanburns/redis-slave 192.168.101.11/ name=redisslave,replicationController=redisSlaveController
d7530470-359b-11e4-ba45-fa163e9cf1d0 brendanburns/php-redis 192.168.101.10/ name=frontend,replicationController=frontendController
d753565b-359b-11e4-ba45-fa163e9cf1d0 brendanburns/php-redis 192.168.101.12/ name=frontend,replicationController=frontendController
d7536f15-359b-11e4-ba45-fa163e9cf1d0 brendanburns/php-redis 192.168.101.11/ name=frontend,replicationController=frontendController

一个redis master服务,两个redis slave服务,三个web前端服务已经启动。

前端PHP代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
<?
set_include_path('.:/usr/share/php:/usr/share/pear:/vendor/predis');
error_reporting(E_ALL);
ini_set('display_errors', 1);
require 'predis/autoload.php';
if (isset($_GET['cmd']) === true) {
header('Content-Type: application/json');
if ($_GET['cmd'] == 'set') {
$client = new Predis\Client([
'scheme' => 'tcp',
'host' => getenv('SERVICE_HOST'),
'port' => getenv('REDISMASTER_SERVICE_PORT'),
]);
$client->set($_GET['key'], $_GET['value']);
print('{"message": "Updated"}');
} else {
$read_port = getenv('REDISMASTER_SERVICE_PORT');
if (isset($_ENV['REDISSLAVE_SERVICE_PORT'])) {
$read_port = getenv('REDISSLAVE_SERVICE_PORT');
}
$client = new Predis\Client([
'scheme' => 'tcp',
'host' => getenv('SERVICE_HOST'),
'port' => $read_port,
]);
$value = $client->get($_GET['key']);
print('{"data": "' . $value . '"}');
}
} else {
phpinfo();
} ?>

查看容器中的环境变量:

master ~ # docker inspect f15c4e1b83ce

[{
    "Args": [
        "-c",
        "/run.sh"
    ],
    "Config": {
        "AttachStderr": false,
        "AttachStdin": false,
        "AttachStdout": false,
        "Cmd": [
            "/bin/sh",
            "-c",
            "/run.sh"
        ],
        "CpuShares": 0,
        "Cpuset": "",
        "Domainname": "",
        "Entrypoint": null,
        "Env": [
            "REDISMASTER_SERVICE_PORT=10000",
            "REDISMASTER_PORT=tcp://192.168.101.10:10000",
            "REDISMASTER_PORT_6379_TCP=tcp://192.168.101.10:10000",
            "REDISMASTER_PORT_6379_TCP_PROTO=tcp",
            "REDISMASTER_PORT_6379_TCP_PORT=10000",
            "REDISMASTER_PORT_6379_TCP_ADDR=192.168.101.10",
            "REDISSLAVE_SERVICE_PORT=10001",
            "REDISSLAVE_PORT=tcp://192.168.101.10:10001",
            "REDISSLAVE_PORT_6379_TCP=tcp://192.168.101.10:10001",
            "REDISSLAVE_PORT_6379_TCP_PROTO=tcp",
            "REDISSLAVE_PORT_6379_TCP_PORT=10001",
            "REDISSLAVE_PORT_6379_TCP_ADDR=192.168.101.10",
            "SERVICE_HOST=192.168.101.10",
            "HOME=/",
            "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
        ],
        "ExposedPorts": {
            "80/tcp": {}
        },
        "Hostname": "net",
        "Image": "brendanburns/php-redis",
        "Memory": 0,
        "MemorySwap": 0,
        "NetworkDisabled": false,
        "OnBuild": null,
        "OpenStdin": false,
        "PortSpecs": null,
        "StdinOnce": false,
        "Tty": false,
        "User": "",
        "Volumes": null,
        "WorkingDir": ""
    },
    "Created": "2014-09-06T10:30:11.88433294Z",
    "Driver": "btrfs",
    "ExecDriver": "native-0.2",
    "HostConfig": {
        "Binds": [],
        "ContainerIDFile": "",
        "Dns": null,
        "DnsSearch": null,
        "Links": null,
        "LxcConf": null,
        "NetworkMode": "container:f43cdcaf0fa106fb7a32fcbda1d2a153eba95ccda740787cde6c7c6a69fe0058",
        "PortBindings": {
            "80/tcp": [
                {
                    "HostIp": "",
                    "HostPort": "8000"
                }
            ]
        },
        "Privileged": false,
        "PublishAllPorts": false,
        "VolumesFrom": null
    },
    "HostnamePath": "",
    "HostsPath": "/var/lib/docker/containers/f43cdcaf0fa106fb7a32fcbda1d2a153eba95ccda740787cde6c7c6a69fe0058/hosts",
    "Id": "f15c4e1b83ce1b17ef9816a4012c23a9ba866f3f4275780a00157cd17bdd1350",
    "Image": "5c640c2fc451f9b15289b38493c8c067faf1bfc89e9986472a78b3eaba25a460",
    "MountLabel": "",
    "Name": "/k8s--php_-_redis.424f700a--d7530470_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--4fdc74e4",
    "NetworkSettings": {
        "Bridge": "",
        "Gateway": "",
        "IPAddress": "",
        "IPPrefixLen": 0,
        "PortMapping": null,
        "Ports": null
    },
    "Path": "/bin/sh",
    "ProcessLabel": "",
    "ResolvConfPath": "/etc/resolv.conf",
    "State": {
        "ExitCode": 0,
        "FinishedAt": "0001-01-01T00:00:00Z",
        "Paused": false,
        "Pid": 1233,
        "Running": true,
        "StartedAt": "2014-09-06T10:30:12.461888562Z"
    },
    "Volumes": {},
    "VolumesRW": {}
}
]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
master ~ # netstat -tunlp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 127.0.0.1:8080 0.0.0.0:* LISTEN 633/apiserver
tcp6 0 0 :::10000 :::* LISTEN 635/proxy
tcp6 0 0 :::10001 :::* LISTEN 635/proxy
tcp6 0 0 :::22 :::* LISTEN 1/systemd
tcp6 0 0 :::7001 :::* LISTEN 601/etcd
tcp6 0 0 :::8000 :::* LISTEN 648/docker
tcp6 0 0 :::4001 :::* LISTEN 601/etcd
tcp6 0 0 :::10250 :::* LISTEN 654/kubelet
udp 0 0 10.244.0.1:123 0.0.0.0:* 466/ntpd
udp 0 0 192.168.101.10:123 0.0.0.0:* 466/ntpd
udp 0 0 127.0.0.1:123 0.0.0.0:* 466/ntpd
udp 0 0 0.0.0.0:123 0.0.0.0:* 466/ntpd
udp6 0 0 fe80::9cd7:59ff:fe8:123 :::* 466/ntpd
udp6 0 0 fe80::d0d3:fff:fe4c:123 :::* 466/ntpd
udp6 0 0 fe80::f816:3eff:fe9:123 :::* 466/ntpd
udp6 0 0 ::1:123 :::* 466/ntpd
udp6 0 0 :::123 :::* 466/ntpd

注意:根据访问Docker repository的速度不同,Docker下载镜像可能需要一段时间。

3、查看各节点docker运行情况

1
2
3
4
5
6
7
8
9
master ~ # docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
dockerfile/redis latest e155911e7a3b 15 hours ago 429.1 MB
kubernetes/pause latest 6c4579af347b 7 weeks ago 239.8 kB
brendanburns/php-redis latest 5c640c2fc451 12 weeks ago 376.9 MB
master ~ # docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f15c4e1b83ce brendanburns/php-redis:latest /bin/sh -c /run.sh 11 hours ago Up 11 hours k8s--php_-_redis.424f700a--d7530470_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--4fdc74e4
f43cdcaf0fa1 kubernetes/pause:latest /pause 14 hours ago Up 14 hours 0.0.0.0:8000->80/tcp k8s--net.92b46b23--d7530470_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--c02ef6f4
1
2
3
4
5
6
7
8
9
10
11
node1 ~ # docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
kubernetes/pause latest 6c4579af347b 7 weeks ago 239.8 kB
brendanburns/php-redis latest 5c640c2fc451 12 weeks ago 376.9 MB
brendanburns/redis-slave latest 6925d4e3f2ad 3 months ago 596.8 MB
node1 ~ # docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
b63ee28bf7dc brendanburns/php-redis:latest /bin/sh -c /run.sh 8 hours ago Up 8 hours k8s--php_-_redis.4f2b7011--d7536f15_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--3c5cf7ed
c5ba9f44e69f kubernetes/pause:latest /pause 11 hours ago Up 11 hours 0.0.0.0:8000->80/tcp k8s--net.92b46b23--d7536f15_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--26583f5e
2aaa03721f41 brendanburns/redis-slave:latest /bin/sh -c /run.sh 11 hours ago Up 11 hours k8s--slave.e468f6dd--1659bc1c_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--10fba79a
486cbc3d1602 kubernetes/pause:latest /pause 14 hours ago Up 14 hours 0.0.0.0:6380->6379/tcp k8s--net.4c806b9d--1659bc1c_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd—55ce1afc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
node2 ~ # docker images
REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE
dockerfile/redis latest e155911e7a3b 15 hours ago 429.1 MB
kubernetes/pause latest 6c4579af347b 7 weeks ago 239.8 kB
brendanburns/php-redis latest 5c640c2fc451 12 weeks ago 376.9 MB
brendanburns/redis-slave latest 6925d4e3f2ad 3 months ago 596.8 MB
node2 ~ # docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
02a6d2271e13 brendanburns/redis-slave:latest /bin/sh -c /run.sh 7 hours ago Up 7 hours k8s--slave.e89df6e1--1659535d_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--28968c03
9323b4963a85 brendanburns/php-redis:latest /bin/sh -c /run.sh 11 hours ago Up 11 hours k8s--php_-_redis.5c077018--d753565b_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--974ead68
3ed6a29ccc24 kubernetes/pause:latest /pause 13 hours ago Up 13 hours 0.0.0.0:8000->80/tcp k8s--net.92b46b23--d753565b_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--8f721955
1bff55962052 kubernetes/pause:latest /pause 13 hours ago Up 12 hours 0.0.0.0:6380->6379/tcp k8s--net.4c806b9d--1659535d_-_359b_-_11e4_-_ba45_-_fa163e9cf1d0.etcd--9207abd5
3ad0a32210f8 dockerfile/redis:latest redis-server /etc/re 13 hours ago Up 13 hours k8s--master.8e627940--redis_-_master_-_2.etcd--af79cd93
5a92957c095a kubernetes/pause:latest /pause 14 hours ago Up 14 hours 0.0.0.0:6379->6379/tcp k8s--net.51c76ba5--redis_-_master_-_2.etcd--9f9b0502

4、访问测试

master ~ # curl ‘http://127.0.0.1:8000

<html ng-app="redis">
  <head>
    <title>Guestbook</title>
    <link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.1.1/css/bootstrap.min.css">
    <script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.12/angular.min.js"></script>
    <script src="/controllers.js"></script>
    <script src="ui-bootstrap-tpls-0.10.0.min.js"></script>
  </head>
  <body ng-controller="RedisCtrl">
    <div style="width: 50%; margin-left: 20px">
      <h2>Guestbook</h2>
    <form>
    <fieldset>
    <input ng-model="msg" placeholder="Messages" class="form-control" type="text" name="input"><br>
    <button type="button" class="btn btn-primary" ng-click="controller.onRedis()">Submit</button>
    </fieldset>
    </form>
    <div>
      <div ng-repeat="msg in messages">
        {{msg}}
      </div>
    </div>
    </div>
  </body>
</html>