您的位置:首页 > 运维架构

ubuntu(16 上搭建openvswith kubernetes

2017-10-26 14:02 573 查看

替换apt源为阿里源

1、复制原文件备份

sudo cp /etc/apt/sources.list  /etc/apt/sources.list.bak

2、编辑源列表文件

sudo vim /etc/apt/sources.list

3、将原来的列表删除,添加如下内容

deb http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse

deb http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse

deb-src http://mirrors.aliyun.com/ubuntu/ xenial main restricted universe multiverse

deb-src http://mirrors.aliyun.com/ubuntu/ xenial-security main restricted universe multiverse

deb-src http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted universe multiverse

deb-src http://mirrors.aliyun.com/ubuntu/ xenial-proposed main restricted universe multiverse

deb-src http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse

4、更新
apt-get update
apt-get upgrade

安装docker(安装最新版)

https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#xenial-1604-and-newer

apt-get update

apt-get install \

    linux-image-extra-$(uname -r) \

    linux-image-extra-virtual

Install packages to allow apt to use a repository over HTTPS:apt-get update

$ sudo apt-get install \

    apt-transport-https \

    ca-certificates \

    curl \

    software-properties-common

Add Docker’s official GPG key:

$ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -

运行之前:apt-key list

pub   1024D/437D05B5 2004-09-12

uid                  Ubuntu Archive Automatic Signing Key <ftpmaster@ubuntu.com>

sub   2048g/79164387 2004-09-12

pub   4096R/C0B21F32 2012-05-11

uid                  Ubuntu Archive Automatic Signing Key (2012) <ftpmaster@ubun                                                                                                             tu.com>

pub   4096R/EFE21092 2012-05-11

uid                  Ubuntu CD Image Automatic Signing Key (2012) <cdimage@ubunt                                                                                                             u.com>

pub   1024D/FBB75451 2004-12-30

uid               
173cf
  Ubuntu CD Image Automatic Signing Key <cdimage@ubuntu.com>

sudo apt-key fingerprint 0EBFCD88

运行之后(多了docker的key):apt-key list

pub   1024D/437D05B5 2004-09-12
uid                  Ubuntu Archive Automatic Signing Key <ftpmaster@ubuntu.com>
sub   2048g/79164387 2004-09-12

pub   4096R/C0B21F32 2012-05-11
uid                  Ubuntu Archive Automatic Signing Key (2012) <ftpmaster@ubun                                                                                                             tu.com>

pub   4096R/EFE21092 2012-05-11
uid                  Ubuntu CD Image Automatic Signing Key (2012) <cdimage@ubunt                                                                                                             u.com>

pub   1024D/FBB75451 2004-12-30
uid                  Ubuntu CD Image Automatic Signing Key <cdimage@ubuntu.com>

pub   4096R/0EBFCD88 2017-02-22

uid                  Docker Release (CE deb) <docker@docker.com>

sub   4096R/F273FCD8 2017-02-22

root@million1:~/.ssh# sudo apt-key fingerprint 0EBFCD88
pub   4096R/0EBFCD88 2017-02-22
      密钥指纹 = 9DC8 5822 9FC7 DD38 854A  E2D8 8D81 803C 0EBF CD88
uid                  Docker Release (CE deb) <docker@docker.com>
sub   4096R/F273FCD8 2017-02-22

Use the following command to set up the stable repository. You always need the stable repository, even if you want to install builds from the edge or test repositories as well. To
add the edge or test repository, add the word edge or test (or both) after the word stable in the commands below

add-apt-repository \
   "deb [arch=amd64] https://download.docker.com/linux/ubuntu \
   $(lsb_release -cs) \
   stable" #edge,test,stable

apt-get update

apt-cache madison docker-ce

root@million1:~/.ssh# apt-cache madison docker-ce

 docker-ce | 17.10.0~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/edge amd64 Packages

 docker-ce | 17.09.0~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/edge amd64 Packages

 docker-ce | 17.09.0~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/stable amd64 Packages

 docker-ce | 17.07.0~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/edge amd64 Packages

 docker-ce | 17.06.2~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/edge amd64 Packages

 docker-ce | 17.06.2~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/stable amd64 Packages

 docker-ce | 17.06.1~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/edge amd64 Packages

 docker-ce | 17.06.1~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/stable amd64 Packages

 docker-ce | 17.06.0~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/edge amd64 Packages

 docker-ce | 17.06.0~ce-0~ubuntu | https://download.docker.com/linux/ubuntu xeni                                                                                                             al/stable amd64 Packages

 docker-ce | 17.05.0~ce-0~ubuntu-xenial | https://download.docker.com/linux/ubun                                                                                                             tu xenial/edge amd64 Packages

 docker-ce | 17.04.0~ce-0~ubuntu-xenial | https://download.docker.com/linux/ubun                                                                                                             tu xenial/edge amd64 Packages

 docker-ce | 17.03.2~ce-0~ubuntu-xenial | https://download.docker.com/linux/ubun                                                                                                             tu xenial/stable amd64 Packages

 docker-ce | 17.03.1~ce-0~ubuntu-xenial | https://download.docker.com/linux/ubun                                                                                                             tu xenial/stable amd64 Packages

 docker-ce | 17.03.0~ce-0~ubuntu-xenial | https://download.docker.com/linux/ubun                                                                                                             tu xenial/stable amd64 Packages

apt-get install docker-ce=17.10.0~ce-0~ubuntu

至此docker安装完成 。

可能会遇到的问题:

1 忘记把上面的stable改成edge,以至于找不到17.10.0~ce-0~ubuntu,需要把它改成edge

安装openvswitch 并建立GRE通道

sudo apt-get install openvswitch-switch bridge-utils
ovs-vsctl add-br obr0
ovs-vsctl add-port obr0 gre0 -- set Interface gre0 type=gre options:remote_ip=192.168.56.109

ovs-vsctl add-port obr0 gre0 -- set Interface gre0 type=gre options:remote_ip=192.168.56.110
brctl addbr kbr0

brctl addif kbr0 obr0

ip link set dev docker0 down

ip link del dev docker0

root@million1:~# brctl show

bridge name     bridge id               STP enabled     interfaces

docker0         8000.0242f2b8c699       no

kbr0            8000.d6082b73864b       no              obr0

root@million1:~# ovs-vsctl show

063469fa-2396-4763-a681-581a8f8fc71a

    Bridge "obr0"

        Port "gre0"

            Interface "gre0"

                type: gre

                options: {remote_ip="192.168.56.109"}

        Port "obr0"

            Interface "obr0"

                type: internal

    ovs_version: "2.5.2"

million1:
# vi /etc/network/interfaces

auto kbr0

iface kbr0 inet static

        address 172.17.1.1

        netmask 255.255.255.0

        gateway 172.17.1.0
        dns-nameservers 172.31.1.1

million2:

# vi /etc/network/interfaces

auto kbr0

iface kbr0 inet static

        address 172.17.2.1

        netmask 255.255.255.0

        gateway 172.17.2.0
        dns-nameservers 172.31.1.1

million1:
ip route add 172.17.1.0/24 via 192.168.56.109 dev enp0s8
million2:
ip route add 172.17.2.0/24 via 192.168.56.110
dev enp0s8

root@million1:~# ping 172.17.2.1

PING 172.17.2.1 (172.17.2.1) 56(84) bytes of data.

64 bytes from 172.17.2.1: icmp_seq=261 ttl=64 time=1.31 ms

64 bytes from 172.17.2.1: icmp_seq=262 ttl=64 time=0.354 ms

root@million2:~# ping 172.17.1.1

PING 172.17.1.1 (172.17.1.1) 56(84) bytes of data.

64 bytes from 172.17.1.1: icmp_seq=1 ttl=64 time=0.313 ms

64 bytes from 172.17.1.1: icmp_seq=2 ttl=64 time=0.707 ms

root@million1:~# route -n
内核 IP 路由表
目标            网关            子网掩码        标志  跃点   引用  使用 接口
0.0.0.0         10.0.2.2        0.0.0.0         UG    0      0        0 enp0s3
10.0.2.0        0.0.0.0         255.255.255.0   U     0      0        0 enp0s3
169.254.0.0     0.0.0.0         255.255.0.0     U     1000   0        0 enp0s8
172.17.1.0      0.0.0.0         255.255.255.0   U     0      0        0 kbr0
172.17.2.0      192.168.56.109  255.255.255.0   UG    0      0        0 enp0s8
192.168.56.0    0.0.0.0         255.255.255.0   U     0      0        0 enp0s8

root@million2:~# route -n
内核 IP 路由表
目标            网关            子网掩码        标志  跃点   引用  使用 接口
0.0.0.0         10.0.2.2        0.0.0.0         UG    0      0        0 enp0s3
10.0.2.0        0.0.0.0         255.255.255.0   U     0      0        0 enp0s3
169.254.0.0     0.0.0.0         255.255.0.0     U     1000   0        0 enp0s8
172.17.0.0      0.0.0.0         255.255.0.0     U     0      0        0 docker0
172.17.1.0      192.168.56.110  255.255.255.0   UG    0      0        0 enp0s8
172.17.2.0      0.0.0.0         255.255.255.0   U     0      0        0 kbr0
192.168.56.0    0.0.0.0         255.255.255.0   U     0      0        0 enp0s8

但是我重启后发现网桥就没有了, 路由也没有了, 于是就搞了个开机启动脚本保证每次启动的时候就自动添加网桥和路由:
root@million2:~# cat ./kbr0_setup.sh

#!/bin/sh

### BEGIN INIT INFO

# Provides:          kbr0_setup.sh

### END INIT INFO

brctl addbr kbr0

brctl addif kbr0 obr0

ip route add 172.17.1.0/24 via 192.168.56.110 dev enp0s8

root@million1:~# cat ./kbr0_setup.sh

#!/bin/sh

### BEGIN INIT INFO

# Provides:          kbr0_setup.sh

### END INIT INFO

brctl addbr kbr0

brctl addif kbr0 obr0

ip route add 172.17.2.0/24 via 192.168.56.109 dev enp0s8

这样gre通道就完成了。
开机启动脚本添加的几种方法:
1、/etc/profile
2、/etc/bash.bashrc
3、/etc/rc.local
4、/etc/init.d
copy /root/kbr0_setup.sh /etc/init.d/
&& chmod a+x  /etc/init.d/kbr0_setup.sh && update-rc.d kbr0_setup.sh
defaults 100
修改docker的默认网桥docker0:
vi /lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd -H fd:// -b kbr0  --insecure-registry 192.168.56.106:5000
然后重启docker
**** --insecure-registry
192.168.56.106:5000   : 是为了让我们去访问自己的私有库的时候用http

当然我们也可以生成证书配置https的私有仓库,
步骤大概如下:
在仓库
所在机器上:
vi
/etc/ssl/openssl.cnf 添加  
[
v3_ca ] 

subjectAltName=IP:192.168.56.106
openssl
req -newkey rsa:2048 -nodes -sha256 -keyout /root/certs/domain.key -x509 -days 365 -out /root/certs/domain.crt

cat
domain.crt >> /etc/ssl/certs/ca-certificates.crt

重启docker,重启registry

在使用仓库机器上:
 mkdir
/etc/docker/certs.d/192.168.56.106:5000/

scp 
root@192.168.56.106:/root/certs/domain.crt  /etc/docker/certs.d/192.168.56.106:5000/ca.crt

root@million2:~# docker pull 192.168.56.106:5000/nginx:1.0

1.0: Pulling from nginx

fa937339c182: Pull complete

8f7e34fad634: Pull complete

03af90059de1: Pull complete

Digest: sha256:cc3085f62ae8f64234598b98aaf15fc0eb88533a10b794625e656de352c890b4

Status: Downloaded newer image for 192.168.56.106:5000/nginx:1.0

docker run -it -v /sbin/ifconfig:/sbin/ifconfig 192.168.56.106:5000/nginx:1.0 /bin/bash

 /sbin/ifconfig eth0

eth0      Link encap:Ethernet  HWaddr 02:42:ac:11:02:02

          inet addr:172.17.2.2  Bcast:0.0.0.0  Mask:255.255.255.0

          UP BROADCAST RUNNING MULTICAST  MTU:1500  Metric:1

          RX packets:20 errors:0 dropped:0 overruns:0 frame:0

          TX packets:0 errors:0 dropped:0 overruns:0 carrier:0

          collisions:0 txqueuelen:0

          RX bytes:2575 (2.5 KB)  TX bytes:0 (0.0 B)

这里也证明了我们的网桥生效了。

等空了继续kubernetes的安装

ETCD Kubernetes 安装

为了安装最新的kubernetes, 并且从github下载源码自己进行编译需要有golang环境
我这里选用了在线代理去下载go1.9.2的安装包go1.9.2.linux-amd64.tar.gz
在线代理https://k3.ol.youyouby.xyz/

然后设置win10共享文件 C:\\share, 接着添加新用户(设置-》账户-》家人和其他成员-》将其他人添加到这台电脑-》我没有这个登录信息-》添加一个没有Microsoft账户的用户-》设置用户信息 ), 为共享文件夹设置该用户(右击自己建立的共享文件夹-》属性-》共享-》共享(S)-》添加之前添加的用户)

在ubuntu上执行下面的命令 
mount -t cifs -o username=bill,password=bill //192.168.0.101/share /winhost

或者利用桌面进行操作:
文件-》home-》连接到服务器-》输入 smb://192.168.0.101/-》连接

安装golang环境:
cp /winhost/go1.9.2.linux-amd64.tar.gz /root/kubernetes/
tar -xf go1.9.2.linux-amd64.tar.gz
vi /root/.bashrc #添加如下环境变量

export GOROOT=/root/kubernetes/go/

export GOARCH=amd64

export GOOS=linux

export GOBIN=$GOROOT/bin/

export GOTOOLS=$GOROOT/pkg/tool/

export PATH=$PATH:$GOBIN:$GOTOOLS

root@master:/app/work/pod# go version

go version go1.9.2 linux/amd64

下载kubernetes server and client(在线代理去官方下载 https://k3.ol.youyouby.xyz/, 首先在github上找到想要下载的版本得到链接然后根据链接用在线代理下载 )
kubernetes-client-linux-amd64.tar.gz  kubernetes-server-linux-amd64.tar.gz

root@master:/app/kubernetes# ls

etcd-v3.2.9-linux-amd64  etcd-v3.2.9-linux-amd64.tar.gz  go  go1.9.2.linux-amd64.tar.gz  kubernetes  kubernetes-server-linux-amd64.tar.gz

安装etcd
root@master:/app/kubernetes# tar -xf  etcd-v3.2.9-linux-amd64.tar.gz

etcd-v3.2.9-linux-amd64

cp /app/kubernetes/etcd /user/bin  

cp /app/kubernetes/etcdctl ./user/bin
root@master:/app/kubernetes# vi /etc/etcd/etcd.conf

ETCD_NAME=default

ETCD_DATA_DIR="/var/lib/etcd/"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.56.111:2379"

root@master:/app/kubernetes# mkdir /var/lib/etcd

root@master:/app/kubernetes# vi /lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

EnvironmentFile=-/etc/etcd/etcd.conf

# set GOMAXPROCS to number of processors

ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd --name=\"${ETCD_NAME}\" --data-dir=\"${ETCD_DATA_DIR}\" --listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\""

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

启动etcd:
systemctl start etcd 

root@master:/app/kubernetes# etcdctl ls

/registry

或者用curl测试:
root@master:/app/kubernetes# curl localhost:2379/v2/keys/registry

{"action":"get","node":{"key":"/registry","dir":true,"nodes":[{"key":"/registry/namespaces","dir":true,"modifiedIndex":6,"createdIndex":6},{"key":"/registry/minions","dir":true,"modifiedIndex":16,"createdIndex":16},{"key":"/registry/deployments","dir":true,"modifiedIndex":64201,"createdIndex":64201},{"key":"/registry/pods","dir":true,"modifiedIndex":64203,"createdIndex":64203},{"key":"/registry/replicasets","dir":true,"modifiedIndex":64202,"createdIndex":64202},{"key":"/registry/ranges","dir":true,"modifiedIndex":4,"createdIndex":4},{"key":"/registry/services","dir":true,"modifiedIndex":8,"createdIndex":8},{"key":"/registry/events","dir":true,"modifiedIndex":17,"createdIndex":17},{"key":"/registry/serviceaccounts","dir":true,"modifiedIndex":293,"createdIndex":293}],"modifiedIndex":4,"createdIndex":4}}

现在安装kubernetes
root@master:/app/kubernetes# tar -xf kubernetes-server-linux-amd64.tar.gz
root@master:/app/kubernetes/kubernetes# ls

addons  client  kubernetes-src.tar.gz  LICENSES  server

root@master:/app/kubernetes/kubernetes# cd server/bin/

root@master:/app/kubernetes/kubernetes/server/bin# ls

cloud-controller-manager  kube-aggregator             kube-apiserver             kube-controller-manager             kubectl  kube-proxy             kube-scheduler

hyperkube                 kube-aggregator.docker_tag  kube-apiserver.docker_tag  kube-controller-manager.docker_tag  kubefed  kube-proxy.docker_tag  kube-scheduler.docker_tag

kubeadm                   kube-aggregator.tar         kube-apiserver.tar         kube-controller-manager.tar         kubelet  kube-proxy.tar         kube-scheduler.tar

分别 kube-apiserver             kube-controller-manager       kube-scheduler 拷贝到master /usr/bin下面 , 将kubectl  kube-proxy  拷贝到node的/usr/bin下面

root@master:/app# mkdir /etc/kubernetes/

root@master:/etc/kubernetes# vi apiserver  config  controller-manager  scheduler

root@master:/etc/kubernetes# cat apiserver| grep -v '^#'|egrep -v '^$'

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

KUBE_API_PORT="--port=8080"

KUBELET_PORT="--kubelet-port=10250"

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.56.111:2379"

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"

KUBE_STORAGE_BACKEND="--storage-backend etcd2"

KUBE_API_ARGS=""

root@master:/etc/kubernetes# cat config| grep -v '^#'|egrep -v '^$'

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=0"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBE_MASTER="--master=http://192.168.56.111:8080"

root@master:/etc/kubernetes# cat controller-manager| grep -v '^#'|egrep -v '^$'

KUBE_CONTROLLER_MANAGER_ARGS=""

root@master:/etc/kubernetes# cat scheduler| grep -v '^#'|egrep -v '^$'

KUBE_SCHEDULER_ARGS=""


root@master:/etc/kubernetes# vi /lib/systemd/system/kube-apiserver.service

[Unit]

Description=Kubernetes API Server

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

After=etcd.service

Wants=etcd.service

[Service]
User=root #user不定义会失败

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/apiserver

#ExecStart=/bin/echo "/usr/bin/kube-apiserver $KUBE_LOGTOSTDERR $KUBE_LOG_LEVEL $KUBE_ETCD_SERVERS $KUBE_API_ADDRESS $KUBE_API_PORT  $KUBELET_PORT $KUBE_ALLOW_PRIV $KUBE_SERVICE_ADDRESSES $KUBE_ADMISSION_CONTROL $KUBE_API_ARGS $KUBE_STORAGE_BACKEND"

ExecStart=/usr/bin/kube-apiserver \

            $KUBE_LOGTOSTDERR \

            $KUBE_LOG_LEVEL \

            $KUBE_ETCD_SERVERS \

            $KUBE_API_ADDRESS \

            $KUBE_API_PORT \

            $KUBELET_PORT \

            $KUBE_ALLOW_PRIV \

            $KUBE_SERVICE_ADDRESSES \

            $KUBE_ADMISSION_CONTROL \

            $KUBE_API_ARGS \

            $KUBE_STORAGE_BACKEND

Restart=on-failure

Type=notify

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

root@master:/etc/kubernetes# vi /lib/systemd/system/kube-controller-manager.service

[Unit]

Description=Kubernetes Controller Manager

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/controller-manager

User=root

ExecStart=/usr/bin/kube-controller-manager \

            $KUBE_LOGTOSTDERR \

            $KUBE_LOG_LEVEL \

            $KUBE_MASTER \

            $KUBE_CONTROLLER_MANAGER_ARGS

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

root@master:/etc/kubernetes# vi /lib/systemd/system/kube-scheduler.service

[Unit]

Description=Kubernetes Scheduler Plugin

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

[Service]

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/scheduler
User=root

ExecStart=/usr/bin/kube-scheduler \

            $KUBE_LOGTOSTDERR \

            $KUBE_LOG_LEVEL \

            $KUBE_MASTER \

            $KUBE_SCHEDULER_ARGS

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

systemctl daemon-reload
systemctl start  kube-apiserver.service kube-controller-manager.service kube-scheduler.service

在million node上面:
scp root@192.168.56.111:/etc/kubernetes/config /etc/kubernetes/config
root@million1:/etc/kubernetes# vi kubelet

KUBELET_ADDRESS="--address=127.0.0.1"

KUBELET_HOSTNAME="--hostname-override=192.168.56.110"

KUBELET_API_SERVER="--api-servers=http://192.168.56.111:8080"

# pod infrastructure container

KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.RedHat.com/rhel7/pod-infrastructure:latest"

KUBELET_ARGS="--enable-server=true --enable-debugging-handlers=true"

root@million1:/etc/kubernetes# vi proxy

# kubernetes proxy config

# default config should be adequate

# Add your own!

KUBE_PROXY_ARGS=""

###

# kubernetes system config

#

# The following values are used to configure various aspects of all

# kubernetes services, including

#

#   kube-apiserver.service

#   kube-controller-manager.service

#   kube-scheduler.service

#   kubelet.service

#   kube-proxy.service

# logging to stderr means we get it in the systemd journal

KUBE_LOGTOSTDERR="--logtostderr=true"

# journal message level, 0 is debug

KUBE_LOG_LEVEL="--v=0"

# Should this cluster be allowed to run privileged docker containers

KUBE_ALLOW_PRIV="--allow-privileged=false"

# How the controller-manager, scheduler, and proxy find the apiserver

KUBE_MASTER="--master=http://192.168.56.111:8080"

root@million1:/etc/kubernetes# vi /lib/systemd/system/kubelet.service

[Unit]

Description=Kubernetes Kubelet

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=docker.service

Requires=docker.service

[Service]

WorkingDirectory=/var/lib/kubelet

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/kubelet

ExecStart=/usr/bin/kubelet \

        $KUBE_LOGTOSTDERR \

        $KUBE_LOG_LEVEL \

        $KUBELET_API_SERVER \

        $KUBELET_ADDRESS \

        $KUBELET_PORT \

        $KUBELET_HOSTNAME \

        $KUBE_ALLOW_PRIV \

        $KUBELET_POD_INFRA_CONTAINER \

        $KUBELET_ARGS

Restart=on-failure

KillMode=process

[Install]

WantedBy=multi-user.target

root@million1:/etc/kubernetes# vi /lib/systemd/system/kube-proxy.service

[Unit]

Description=Kubernetes Proxy

Documentation=https://github.com/GoogleCloudPlatform/kubernetes

After=network.target

[Service]

EnvironmentFile=-/etc/kubernetes/config

EnvironmentFile=-/etc/kubernetes/proxy

ExecStart=/usr/bin/kube-proxy \

        $KUBE_LOGTOSTDERR \

        $KUBE_LOG_LEVEL \

        $KUBE_MASTER \

        $KUBE_PROXY_ARGS

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

启动kubelet and proxy
root@million1:/etc/kubernetes# systemctl start kubelet kube-proxy

root@million1:/etc/kubernetes#  systemctl status kubelet kube-proxy

● kubelet.service - Kubernetes Kubelet

   Loaded: loaded (/lib/systemd/system/kubelet.service; disabled; vendor preset: enabled)

   Active: active (running) since 二 2017-10-31 14:29:04 CST; 12h ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 10033 (kubelet)

    Tasks: 13

   Memory: 66.9M

      CPU: 13min 11.686s

   CGroup: /system.slice/kubelet.service

           ├─10033 /usr/bin/kubelet --logtostderr=true --v=0 --api-servers=http://192.168.56.111:8080 --address=127.0.0.1 --hostname-override=192.168.56.110 --allow-privileged=false --pod-i

           └─10118 journalctl -k -f

11月 01 00:23:02 million1 kubelet[10033]: W1101 00:23:02.714816   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

11月 01 00:23:03 million1 kubelet[10033]: W1101 00:23:03.725572   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

11月 01 00:23:08 million1 kubelet[10033]: W1101 00:23:08.699696   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-h02s8 through plugin: invalid

11月 01 00:23:08 million1 kubelet[10033]: W1101 00:23:08.711487   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-4232910910-wxptx through plugin: invalid

11月 01 00:32:45 million1 kubelet[10033]: W1101 00:32:45.378239   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

11月 01 00:32:45 million1 kubelet[10033]: W1101 00:32:45.864820   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

11月 01 00:32:45 million1 kubelet[10033]: W1101 00:32:45.869355   10033 pod_container_deletor.go:77] Container "6a913d50a0d48588e62e82a42b369bda3a00080ba4d8fa0beab9364c6271db42" not found i

11月 01 00:32:45 million1 kubelet[10033]: W1101 00:32:45.870953   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

11月 01 00:32:46 million1 kubelet[10033]: W1101 00:32:46.885295   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

11月 01 00:33:08 million1 kubelet[10033]: W1101 00:33:08.857615   10033 docker_sandbox.go:263] Couldn't find network status for default/mysql-tomcat-586356848-n59qd through plugin: invalid

● kube-proxy.service - Kubernetes Proxy

   Loaded: loaded (/lib/systemd/system/kube-proxy.service; disabled; vendor preset: enabled)

   Active: active (running) since 二 2017-10-31 14:29:09 CST; 12h ago

     Docs: https://github.com/GoogleCloudPlatform/kubernetes
 Main PID: 10132 (kube-proxy)

    Tasks: 6

   Memory: 19.1M

      CPU: 1min 49.449s

   CGroup: /system.slice/kube-proxy.service

           └─10132 /usr/bin/kube-proxy --logtostderr=true --v=0 --master=http://192.168.56.111:8080

10月 31 14:29:09 million1 systemd[1]: Started Kubernetes Proxy.

10月 31 14:29:10 million1 kube-proxy[10132]: I1031 14:29:10.041080   10132 server.go:225] Using iptables Proxier.

10月 31 14:29:10 million1 kube-proxy[10132]: W1031 14:29:10.047605   10132 server.go:469] Failed to retrieve node info: nodes "million1" not found

10月 31 14:29:10 million1 kube-proxy[10132]: W1031 14:29:10.047704   10132 proxier.go:293] invalid nodeIP, initializing kube-proxy with 127.0.0.1 as nodeIP

10月 31 14:29:10 million1 kube-proxy[10132]: W1031 14:29:10.047712   10132 proxier.go:298] clusterCIDR not specified, unable to distinguish between internal and external traffic

10月 31 14:29:10 million1 kube-proxy[10132]: I1031 14:29:10.047727   10132 server.go:249] Tearing down userspace rules.

10月 31 14:29:10 million1 kube-proxy[10132]: I1031 14:29:10.184166   10132 conntrack.go:81] Set sysctl 'net/netfilter/nf_conntrack_max' to 131072

10月 31 14:29:10 million1 kube-proxy[10132]: I1031 14:29:10.184988   10132 conntrack.go:66] Setting conntrack hashsize to 32768

10月 31 14:29:10 million1 kube-proxy[10132]: I1031 14:29:10.185336   10132 conntrack.go:81] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_established' to 86400

10月 31 14:29:10 million1 kube-proxy[10132]: I1031 14:29:10.185505   10132 conntrack.go:81] Set sysctl 'net/netfilter/nf_conntrack_tcp_timeout_close_wait' to 3600

接下来就开始验证网路:
注意这里我们首先关闭iptables里面的FORWARD链里面drop, 否则gre通道将会被DROP掉
root@million1:/etc/kubernetes# iptables -S

-P INPUT ACCEPT

-P FORWARD DROP

-P OUTPUT ACCEPT

改为:
root@million1:/etc/kubernetes# iptables -S

-P INPUT ACCEPT

-P FORWARD ACCEPT

-P OUTPUT ACCEPT

root@master:/app/work/pod# vi Deployment.yaml

apiVersion: extensions/v1beta1

kind: Deployment

metadata:

  name: mysql

  labels:

    app: mysql

spec:

  replicas: 1

  minReadySeconds: 60     #滚动升级时60s后认为该pod就绪

  strategy:

    rollingUpdate:  ##由于replicas为3,则整个升级,pod个数在2-4个之间

      maxSurge: 1      #滚动升级时会先启动1个pod

      maxUnavailable: 1 #滚动升级时允许的最大Unavailable的pod个数

  template:

    metadata:

      labels:

        app: mysql

    spec:

      terminationGracePeriodSeconds: 60 ##k8s将会给应用发送SIGTERM信号,可以用来正确、优雅地关闭应用,默认为30秒

      containers:

      - name: mysql

        image: 192.168.56.106:5000/mysql:5.5

        ports:

        - containerPort: 3306

        volumeMounts:

        - name: mysql-vol

          mountPath: /docker-entrypoint-initdb.d

      volumes:

      - name: mysql-vol

        hostPath:

          path: /root

root@master:/app/work/pod# kubectl create -f Deployment.yaml

root@master:/app/work/pod# vi Deployment_mysql_tomcat.yaml

apiVersion: apps/v1beta1

kind: Deployment

metadata:

  name: mysql-tomcat

spec:

  replicas: 1

  template:

    metadata:

      labels:

        app: mysql-tomcat

    spec:

      containers:

      - name: mysql-tomcat

        image: 192.168.56.106:5000/mysql_tomcat:1.0

        ports:

        - containerPort: 8080

      nodeSelector:

        name: node109

root@master:/app/work/pod# kubectl create -f Deployment_mysql_tomcat.yaml

这里我们首先创建了mysql pod, 然后为了测试夸网络的访问, 然后我们希望tomcat mysql 客户端部署到另外一台node上,所以给其中没有部署MySQL的node加标签
root@master:/app/work/pod# vi node109

apiVersion: v1

kind: Node

metadata:

  name: 192.168.56.109

  labels:

    environment: production

    name: node109

    role: web

kubectl delete node 192.168.56.109

kubectl create -f node109

NAME             STATUS    AGE       VERSION   EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION      LABELS

192.168.56.109   Ready     2h        v1.6.2    <none>        Ubuntu 16.04.3 LTS   4.10.0-37-generic   environment=production,name=node109,role=web

192.168.56.110   Ready     3d        v1.6.2    <none>        Ubuntu 16.04.3 LTS   4.10.0-37-generic   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=192.168.56.110

root@master:/app/work/pod# kubectl get pods -o wide

NAME                            READY     STATUS    RESTARTS   AGE       IP           NODE

mysql-4104382215-bg2hv          1/1       Running   0          3h        172.17.1.2   192.168.56.110

mysql-tomcat-1796349124-sxwln   1/1       Running   0          1h        172.17.2.2   192.168.56.109

可以从下面的tomcat的Dockerfile文件看出,mysql-tomcat-1796349124-sxwln 对 mysql-4104382215-bg2hv 进行了访问
root@million2:~/test_docker# cat Dockerfile

FROM       192.168.56.106:5000/tomcat:7.0

MAINTAINER billwang <363703011@qq.com>

ADD init /tmp/init

RUN chmod a+x /tmp/init

ENTRYPOINT ["/tmp/init", "/scripts/run"]

root@million2:~/test_docker# cat init

#!/bin/bash

set -e

DB_HOST=${DB_HOST:-172.17.1.2}

DB_USER=${DB_USER:-admin}

DB_PASS=${DB_PASS:-csphere2015}

if ! mysql -u${DB_USER} -p${DB_PASS} -h ${DB_HOST} -e "show databases;" | grep -w xc; then

  mysql -u${DB_USER} -p${DB_PASS} -h ${DB_HOST} -e "create database if not exists xc;"

  mysql -u${DB_USER} -p${DB_PASS} -h ${DB_HOST} xc -e "create table if not exists user(id int(4),name char(20));"

  mysql -u${DB_USER} -p${DB_PASS} -h ${DB_HOST} xc -e "insert into user(id,name) values('1','csphere');"

  mysql -u${DB_USER} -p${DB_PASS} -h ${DB_HOST} xc -e "select * from user;" >> /tomcat/webapps/examples/test.html

fi

exec "$@"

root@million2:~/test_docker# curl 172.17.2.2:8080/examples/test.html

id      name

1       csphere

说明tomcat确实访问mysql成功了
当然我们还可以添加service,然后从浏览器进行访问
root@master:/app/work/pod# vi mysql_tomcat_service.yaml

apiVersion: v1

kind: Service

metadata:

  name: mysql-tomcat-expose

spec:

  ports:

  - port: 80

    targetPort: 8080

  selector:

    app: mysql-tomcat

  type: NodePort

kubectl create -f mysql_tomcat_service.yaml

root@master:/app/work/pod# kubectl get svc

NAME                  CLUSTER-IP       EXTERNAL-IP   PORT(S)        AGE

kubernetes            10.254.0.1       <none>        443/TCP        3d

mysql-tomcat-expose   10.254.153.133   <nodes>       80:30396/TCP   1h

root@master:/app/work/pod# curl 192.168.56.109:30396/examples/test.html

id      name

1       csphere

root@master:/app/work/pod# curl 192.168.56.110:30396/examples/test.html

id      name

1       csphere





遇到的问题:
1 首先是在建立gre通道的时候, brctl建立的网桥重启的时候消失了, 导致网络不成功 ,我这里为了简单搞了个自启动脚本, 为了建立持续的网桥也可以添加到配置文件
2 在测试互访的时候iptables的FORWARD链是默认DROP的导致不能跨机互访, 将其改为ACCEPT
3 测试的时候需要建立自己的仓库, 发现不能从其它机器上pull , 这个需要将docker 配置文件:
root@million2:~/test_docker# vi /etc/docker/daemon.js

{

"insecure-registries": ["192.168.56.106:5000"]

}

/lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd -H fd:// -b kbr0 --insecure-registry 192.168.56.106:5000
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息
标签: