前言
本文介绍银河麒麟高级服务器操作系统V10上安装部署k8s单机集群及一些基础的kubectl指令
本文涉及部署脚本主要源自基于https://github.com/easzlab/kubeasz在arm64平台上的适配调整项目https://github.com/hknarutofk/kubeasz-arm64
国内加速地址
https://git.trustie.net/hknaruto/kubeasz-arm64.git
https://gitee.com/hknarutofk/kubeasz-arm64.git
一、下载kubeasz-arm64项目
git clone https://git.trustie.net/hknaruto/kubeasz-arm64.git
二、部署k8s单机集群
复制ansible脚本到/etc目录
-
[yeqiang@192-
168-
110-
185 桌面]$ cd kubeasz-arm64/
-
[yeqiang@192-
168-
110-
185 kubeasz-arm64]$ sudo cp etc/ansible/
/etc/ -r
下载arm64资源
-
[yeqiang@
192
-168
-110
-185 kubeasz-arm64]$ sudo ./easzup -D
-
[INFO] Action
begin : download_all
-
[INFO] downloading docker binaries
19.03
.8
-
[INFO] downloading docker binaries
19.03
.8
-
% Total % Received % Xferd Average Speed Time Time Time Current
-
Dload Upload Total Spent Left Speed
-
100
53.9M
100
53.9M
0
0
1999k
0
0:
00:
27
0:
00:
27 --:--:--
2675k
-
[WARN] docker is already running.
-
[WARN] kubeasz already existed
-
[INFO] downloading kubernetes v1
.18
.3 binaries
-
v1
.18
.3: Pulling from hknaruto/easzlab-kubeasz-k8s-bin
-
941
f399634ec: Pull complete
-
aa2b3983a2ff: Pull complete
-
Digest:
sha256:
8f835fd8628086b0fca4c1f8a206c6e65e5dd3d4f634e3284a088545e5edb2f0
-
Status: Downloaded newer image
for registry.cn-hangzhou.aliyuncs.com
/hknaruto/easzlab-kubeasz-k8s-
bin:v1
.18
.3
-
registry.cn-hangzhou.aliyuncs.com
/hknaruto/easzlab-kubeasz-k8s-
bin:v1
.18
.3
-
[INFO] run a temporary container
-
8b6f9732c99c5d761fc93a323a662beeb723bbae0a84dd49b664ce26ee01769a
-
[INFO] cp k8s binaries
-
[INFO] stop&remove temporary container
-
temp_k8s_bin
-
[INFO] downloading extral binaries kubeasz-ext-
bin:
0.5
.2
-
0.5
.2: Pulling from hknaruto/easzlab-kubeasz-ext-bin
-
941
f399634ec: Already exists
-
cfc607fad870: Pulling fs layer
-
2115498
b7091: Pulling fs layer
-
6e27
e1bff847: Pull complete
-
b625303c2cc3: Pull complete
-
91671
aa9bd47: Pull complete
-
164
c7f0e53a8: Pull complete
-
728
cc5df7bfb: Pull complete
-
6
b7774a0bde6: Pull complete
-
0
fb37107d1fa: Pull complete
-
ea66d491fdd1: Pull complete
-
a3e774c2ae77: Pull complete
-
d781ce906d8a: Pull complete
-
069
c33e69879: Pull complete
-
fe2f2460a2b7: Pull complete
-
7
b2d223b3413: Pull complete
-
f64dd4a25e3c: Pull complete
-
3e7
e09b40160: Pull complete
-
f72069b3ad47: Pull complete
-
39011336
cbef: Pull complete
-
9
c4abea5f490: Pull complete
-
1
f773f1865c0: Pull complete
-
30
d34578fa28: Pull complete
-
bd7bbf798576: Pull complete
-
d822d8287374: Pull complete
-
5
a88f3133dc2: Pull complete
-
Digest:
sha256:
9dd7b290c5d00283997fa79636ef75af6f613af8776277a0b8eeca7d1f6dab23
-
Status: Downloaded newer image
for registry.cn-hangzhou.aliyuncs.com
/hknaruto/easzlab-kubeasz-ext-
bin:
0.5
.2
-
registry.cn-hangzhou.aliyuncs.com
/hknaruto/easzlab-kubeasz-ext-
bin:
0.5
.2
-
[INFO] run a temporary container
-
8070c787f73f9ee063cff4a5686b9c7c7cee8d06f7fc57e0a06d3ce3ddbe8cb8
-
[INFO] cp extral binaries
-
[INFO] stop&remove temporary container
-
temp_ext_bin
-
[INFO] downloading offline images
-
v3
.8
.8
-1: Pulling from calico/cni
-
007027
d142c8: Pull complete
-
0736
a45633dd: Pull complete
-
7
b369e9378de: Pull complete
-
f9ddfb4bcf48: Pull complete
-
93
ae23d295fd: Pull complete
-
e0e112587ac2: Pull complete
-
Digest:
sha256:b08570f92e5ca7f372e331856c1fc1e731a4b57e394eca45ec8e0b008d8b6ee0
-
Status: Downloaded newer image
for calico/
cni:v3
.8
.8
-1
-
docker.io
/calico/
cni:v3
.8
.8
-1
-
v3
.8
.8: Pulling from calico/pod2daemon-flexvol
-
788
aef77d06b: Pull complete
-
1400
fae2005b: Pull complete
-
aafaa18c2ba4: Pull complete
-
Digest:
sha256:
5e452525444217b7297619d78f4167648bec42242b56322c82a0315c454ffc86
-
Status: Downloaded newer image
for calico/pod2daemon-
flexvol:v3
.8
.8
-
docker.io
/calico/pod2daemon-
flexvol:v3
.8
.8
-
v3
.8
.8: Pulling from calico/kube-controllers
-
b6493e0c8f7e: Pull complete
-
ee8045068c29: Pull complete
-
Digest:
sha256:
40e48544c79bd47299168b327a88a8c6d40c59c5c5969c9bed8251dd02be92e3
-
Status: Downloaded newer image
for calico/kube-
controllers:v3
.8
.8
-
docker.io
/calico/kube-
controllers:v3
.8
.8
-
v3
.8
.8
-1: Pulling from calico/node
-
788
aef77d06b: Already exists
-
a6d812a2df88: Pull complete
-
f05fc8619223: Pull complete
-
c598b2bf71cc: Pull complete
-
c2456e3aa60a: Pull complete
-
dd80e7cd056f: Pull complete
-
7441056
eba94: Pull complete
-
45737
f21924d: Pull complete
-
4e41
f68bc651: Pull complete
-
Digest:
sha256:
9615a309f00dfab7270de661bfd559a42e0e6396de4d3d0aa18dcc4a63e1b23a
-
Status: Downloaded newer image
for calico/
node:v3
.8
.8
-1
-
docker.io
/calico/
node:v3
.8
.8
-1
-
1.6
.7: Pulling from coredns/coredns
-
c6568d217a00: Pull complete
-
597
f21eeb593: Pull complete
-
Digest:
sha256:
2c8d61c46f484d881db43b34d13ca47a269336e576c81cf007ca740fa9ec0800
-
Status: Downloaded newer image
for coredns/
coredns:
1.6
.7
-
docker.io
/coredns/
coredns:
1.6
.7
-
v2
.0
.1: Pulling from kubernetesui/dashboard-arm64
-
a938d0ebf9f3: Pull complete
-
Digest:
sha256:
88bf7273d8c93c59499949e02091dc52a20a3b3fb236bb8a27f42d679f2ee95b
-
Status: Downloaded newer image
for kubernetesui/dashboard-
arm64:v2
.0
.1
-
docker.io
/kubernetesui/dashboard-
arm64:v2
.0
.1
-
v0
.12
.0: Pulling from kubesphere/flannel
-
8
fa90b21c985: Pull complete
-
c4b41df13d81: Pull complete
-
a73758d03943: Pull complete
-
d09921139b63: Pull complete
-
17
ca61374c07: Pull complete
-
6
da2b4782d50: Pull complete
-
Digest:
sha256:a60e5f494c5f8535b021d27cbe76448be8f61a87421baae0f093a1e563e5f8c6
-
Status: Downloaded newer image
for kubesphere/
flannel:v0
.12
.0
-
docker.io
/kubesphere/
flannel:v0
.12
.0
-
v1
.0
.4: Pulling from kubernetesui/metrics-scraper-arm64
-
45
a3d036b512: Pull complete
-
d4ad31b21cb0: Pull complete
-
81
a334173c0c: Pull complete
-
Digest:
sha256:afbc4844447571d1a2c85c2d8be2601387f99ac25db697adb8167de4e7d21909
-
Status: Downloaded newer image
for kubernetesui/metrics-scraper-
arm64:v1
.0
.4
-
docker.io
/kubernetesui/metrics-scraper-
arm64:v1
.0
.4
-
v0
.3
.6: Pulling from mirrorgooglecontainers/metrics-server-arm64
-
e8d8785a314f: Pull complete
-
98691
cade31f: Pull complete
-
Digest:
sha256:
448e86a5914d1de95741aaa71009dac84843e460c13b393fc157b7bc657c2fdf
-
Status: Downloaded newer image
for mirrorgooglecontainers/metrics-server-
arm64:v0
.3
.6
-
docker.io
/mirrorgooglecontainers/metrics-server-
arm64:v0
.3
.6
-
3.2: Pulling from kubesphere/pause-arm64
-
84
f9968a3238: Pull complete
-
Digest:
sha256:
31d3efd12022ffeffb3146bc10ae8beb890c80ed2f07363515580add7ed47636
-
Status: Downloaded newer image
for r6w9c7qa.mirror.aliyuncs.com
/kubesphere/pause-
arm64:
3.2
-
r6w9c7qa.mirror.aliyuncs.com
/kubesphere/pause-
arm64:
3.2
-
2.2
.1: Pulling from hknaruto/easzlab-kubeasz
-
941
f399634ec: Already exists
-
405
b20ab5afa: Pull complete
-
Digest:
sha256:
4bb68276e1d65da636543704d522537b3d02cdf3023d444a59516c01a019497d
-
Status: Downloaded newer image
for registry.cn-hangzhou.aliyuncs.com
/hknaruto/easzlab-
kubeasz:
2.2
.1
-
registry.cn-hangzhou.aliyuncs.com
/hknaruto/easzlab-
kubeasz:
2.2
.1
-
[INFO] Action
successed : download_all
-
[yeqiang@
192
-168
-110
-185 kubeasz-arm64]$
启动kubeasz容器
-
[
yeqiang@192-168-110-185 kubeasz-arm64]$ sudo ./easzup -S
-
[
INFO] Action begin : start_kubeasz_docker
-
[
INFO]
get host IP:
192.168
.110
.185
-
Loaded image: registry.cn-hangzhou.aliyuncs.com/hknaruto/easzlab-kubeasz:
2.2
.1
-
[
INFO] run kubeasz
in a container
-
b1c4b6e878b76bdc559b74f8d6522e78727168bfe2df4b19b863f79409b73a32
-
[
INFO] Action successed : start_kubeasz_docker
-
[
yeqiang@192-168-110-185 kubeasz-arm64]$
部署单机k8s集群
注意切换到root用户
-
[yeqiang@192-168-110-185 kubeasz-arm64]$ sudo su
-
[root@192-168-110-185 kubeasz-arm64]
# docker exec -it kubeasz easzctl start-aio | tee aio.log
-
[INFO] Action:
start an AllInOne cluster :
start-aio
-
[INFO] initialize
directory /etc/ansible/.cluster
-
[INFO]
save
current
context:
default
-
[INFO]
save
context:
default
-
[INFO]
save
default
roles
' configration
-
[INFO] clean context: default
-
[INFO] context aio not existed, initialize it using default context
-
[INFO] change current context to aio
-
[INFO] install context: aio
-
[INFO] install aio roles' configration
-
-
[INFO] setup cluster
with
context: aio
-
[INFO] setup
begin
in
5s, press
any
key
to
abort
-
:
-
/usr/lib/python2
.7/site-packages/cryptography/__init__.py:
39: CryptographyDeprecationWarning: Python
2
is
no longer supported
by the Python core team. Support
for it
is
now deprecated
in cryptography,
and will be removed
in a future release.
-
CryptographyDeprecationWarning,
-
Using /etc/ansible/ansible.cfg
as config
file
-
-
PLAY [kube-
master,kube-node,etcd,ex-lb,chrony] *********************************
-
-
TASK [Gathering Facts] *********************************************************
-
ok: [
192.168
.110
.185]
-
-
TASK [chrony : apt更新缓存刷新] ******************************************************
-
-
TASK [chrony : apt 卸载 ntp] *****************************************************
-
-
TASK [chrony : yum 卸载 ntp] *****************************************************
-
-
TASK [chrony : 安装 chrony] ******************************************************
-
-
TASK [chrony : 准备离线安装包目录] ******************************************************
-
-
TASK [chrony : 分发 chrony_xenial 离线包] *******************************************
-
-
TASK [chrony : 安装 chrony_xenial 离线包] *******************************************
-
-
TASK [chrony : 分发 chrony_bionic 离线包] *******************************************
-
-
TASK [chrony : 安装 chrony_bionic 离线包] *******************************************
-
-
TASK [chrony : 分发 chrony_centos7 离线包] ******************************************
-
-
TASK [chrony : 安装 chrony_centos7 离线包] ******************************************
-
-
TASK [chrony : 分发 chrony_stretch 离线包] ******************************************
-
-
TASK [chrony : 安装 chrony_stretch 离线包] ******************************************
-
-
TASK [chrony : 分发 chrony_buster 离线包] *******************************************
-
-
TASK [chrony : 安装 chrony_buster 离线包] *******************************************
-
-
TASK [chrony : 配置 chrony
server] ***********************************************
-
-
TASK [chrony : 配置 chrony
server] ***********************************************
-
-
TASK [chrony : 启动 chrony
server] ***********************************************
-
-
TASK [chrony : 启动 chrony
server] ***********************************************
-
-
TASK [chrony : 配置 chrony
client] ***********************************************
-
-
TASK [chrony : 配置 chrony
client] ***********************************************
-
-
TASK [chrony : 启动 chrony
client] ***********************************************
-
-
TASK [chrony : 启动 chrony
client] ***********************************************
-
-
PLAY [localhost] ***************************************************************
-
-
TASK [Gathering Facts] *********************************************************
-
ok: [localhost]
-
-
TASK [deploy :
prepare
some dirs] **********************************************
-
changed: [localhost] => (item=/etc/ansible/.cluster/ssl) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/etc/ansible/.cluster/ssl",
"mode":
"0750",
"owner":
"root",
"path":
"/etc/ansible/.cluster/ssl",
"size":
6,
"state":
"directory",
"uid":
0}
-
changed: [localhost] => (item=/etc/ansible/.cluster/
backup) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/etc/ansible/.cluster/backup",
"mode":
"0750",
"owner":
"root",
"path":
"/etc/ansible/.cluster/backup",
"size":
6,
"state":
"directory",
"uid":
0}
-
-
TASK [deploy : 本地设置
bin 目录权限] **************************************************
-
changed: [localhost] => {
"changed":
true,
"gid":
0,
"group":
"root",
"mode":
"0755",
"owner":
"root",
"path":
"/etc/ansible/bin",
"size":
4096,
"state":
"directory",
"uid":
0}
-
-
TASK [deploy : 读取ca证书stat信息] ***************************************************
-
ok: [localhost] => {
"changed":
false,
"stat": {
"exists":
false}}
-
-
TASK [deploy : 准备CA配置文件和签名请求] **************************************************
-
changed: [localhost] => (item=ca-config.json) => {
"changed":
true,
"checksum":
"24e9422c9c2462295c458129016d10ae6d8b5327",
"dest":
"/etc/ansible/.cluster/ssl/ca-config.json",
"gid":
0,
"group":
"root",
"item":
"ca-config.json",
"md5sum":
"49df98e6482eefad0d0bfa0fad148033",
"mode":
"0640",
"owner":
"root",
"size":
294,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285184.12-196234004205295/source",
"state":
"file",
"uid":
0}
-
changed: [localhost] => (item=ca-csr.json) => {
"changed":
true,
"checksum":
"dc9dff1628b6558a24b83c2b259d54ab050e7e94",
"dest":
"/etc/ansible/.cluster/ssl/ca-csr.json",
"gid":
0,
"group":
"root",
"item":
"ca-csr.json",
"md5sum":
"33d0182affeaebdef871493633efe886",
"mode":
"0640",
"owner":
"root",
"size":
243,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285184.87-233961476562925/source",
"state":
"file",
"uid":
0}
-
-
TASK [deploy : 生成 CA 证书和私钥] ****************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"cd /etc/ansible/.cluster/ssl && /etc/ansible/bin/cfssl gencert -initca ca-csr.json | /etc/ansible/bin/cfssljson -bare ca",
"delta":
"0:00:01.481495",
"end":
"2021-01-22 03:13:07.481172",
"rc":
0,
"start":
"2021-01-22 03:13:05.999677",
"stderr":
"2021/01/22 03:13:06 [INFO] generating a new CA key and certificate from CSR\n2021/01/22 03:13:06 [INFO] generate received request\n2021/01/22 03:13:06 [INFO] received CSR\n2021/01/22 03:13:06 [INFO] generating key: rsa-2048\n2021/01/22 03:13:07 [INFO] encoded CSR\n2021/01/22 03:13:07 [INFO] signed certificate with serial number 563349098259652949671967805166757570302970741351",
"stderr_lines": [
"2021/01/22 03:13:06 [INFO] generating a new CA key and certificate from CSR",
"2021/01/22 03:13:06 [INFO] generate received request",
"2021/01/22 03:13:06 [INFO] received CSR",
"2021/01/22 03:13:06 [INFO] generating key: rsa-2048",
"2021/01/22 03:13:07 [INFO] encoded CSR",
"2021/01/22 03:13:07 [INFO] signed certificate with serial number 563349098259652949671967805166757570302970741351"],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [deploy : 删除原有kubeconfig] *************************************************
-
ok: [localhost] => {
"changed":
false,
"path":
"/root/.kube/config",
"state":
"absent"}
-
-
TASK [deploy : 下载
group:
read rbac 文件] ******************************************
-
-
TASK [deploy : 创建
group:
read rbac 绑定] *******************************************
-
-
TASK [deploy : 准备kubectl使用的
admin证书签名请求] ****************************************
-
changed: [localhost] => {
"changed":
true,
"checksum":
"70668d7280da49ae027d50242668c23a57a499e5",
"dest":
"/etc/ansible/.cluster/ssl/admin-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"cc0d74cf52c857a45f8eca0a5aa6ffa8",
"mode":
"0640",
"owner":
"root",
"size":
225,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285188.21-74358079049393/source",
"state":
"file",
"uid":
0}
-
-
TASK [deploy : 创建
admin证书与私钥] ***************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"cd /etc/ansible/.cluster/ssl && /etc/ansible/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | /etc/ansible/bin/cfssljson -bare admin",
"delta":
"0:00:01.008021",
"end":
"2021-01-22 03:13:10.027553",
"rc":
0,
"start":
"2021-01-22 03:13:09.019532",
"stderr":
"2021/01/22 03:13:09 [INFO] generate received request\n2021/01/22 03:13:09 [INFO] received CSR\n2021/01/22 03:13:09 [INFO] generating key: rsa-2048\n2021/01/22 03:13:10 [INFO] encoded CSR\n2021/01/22 03:13:10 [INFO] signed certificate with serial number 45917087129669289466907837540257905097561250356\n2021/01/22 03:13:10 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for\nwebsites. For more information see the Baseline Requirements for the Issuance and Management\nof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);\nspecifically, section 10.2.3 (\"Information Requirements\
").",
"stderr_lines": [
"2021/01/22 03:13:09 [INFO] generate received request",
"2021/01/22 03:13:09 [INFO] received CSR",
"2021/01/22 03:13:09 [INFO] generating key: rsa-2048",
"2021/01/22 03:13:10 [INFO] encoded CSR",
"2021/01/22 03:13:10 [INFO] signed certificate with serial number 45917087129669289466907837540257905097561250356",
"2021/01/22 03:13:10 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for",
"websites. For more information see the Baseline Requirements for the Issuance and Management",
"of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);",
"specifically, section 10.2.3 (\"Information Requirements\
")."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [deploy : 设置集群参数] *********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-cluster cluster1 --certificate-authority=/etc/ansible/.cluster/ssl/ca.pem --embed-certs=true --server=https://192.168.110.185:6443",
"delta":
"0:00:00.672177",
"end":
"2021-01-22 03:13:11.078338",
"rc":
0,
"start":
"2021-01-22 03:13:10.406161",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Cluster \"cluster1\
" set.",
"stdout_lines": [
"Cluster \"cluster1\
" set."]}
-
-
TASK [deploy : 设置客户端认证参数] ******************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-credentials admin --client-certificate=/etc/ansible/.cluster/ssl/admin.pem --embed-certs=true --client-key=/etc/ansible/.cluster/ssl/admin-key.pem",
"delta":
"0:00:00.667532",
"end":
"2021-01-22 03:13:12.124043",
"rc":
0,
"start":
"2021-01-22 03:13:11.456511",
"stderr":
"",
"stderr_lines": [],
"stdout":
"User \"
admin\
" set.",
"stdout_lines": [
"User \"
admin\
" set."]}
-
-
TASK [deploy : 设置上下文参数] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-context context-cluster1-admin --cluster=cluster1 --user=admin",
"delta":
"0:00:00.718384",
"end":
"2021-01-22 03:13:13.177529",
"rc":
0,
"start":
"2021-01-22 03:13:12.459145",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Context \"
context-cluster1-
admin\
" created.",
"stdout_lines": [
"Context \"
context-cluster1-
admin\
" created."]}
-
-
TASK [deploy : 选择默认上下文] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config use-context context-cluster1-admin",
"delta":
"0:00:00.689884",
"end":
"2021-01-22 03:13:14.197284",
"rc":
0,
"start":
"2021-01-22 03:13:13.507400",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Switched to context \"
context-cluster1-
admin\
".",
"stdout_lines": [
"Switched to context \"
context-cluster1-
admin\
"."]}
-
-
TASK [deploy : 准备kube-proxy 证书签名请求] ********************************************
-
changed: [localhost] => {
"changed":
true,
"checksum":
"a3425da0c42fa4a407f6efa4d0e596b8190994ac",
"dest":
"/etc/ansible/.cluster/ssl/kube-proxy-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"f5c41965b027030973a528cdf0839475",
"mode":
"0640",
"owner":
"root",
"size":
226,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285194.36-215825752042009/source",
"state":
"file",
"uid":
0}
-
-
TASK [deploy : 创建 kube-proxy证书与私钥] *********************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"cd /etc/ansible/.cluster/ssl && /etc/ansible/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | /etc/ansible/bin/cfssljson -bare kube-proxy",
"delta":
"0:00:01.375494",
"end":
"2021-01-22 03:13:16.528536",
"rc":
0,
"start":
"2021-01-22 03:13:15.153042",
"stderr":
"2021/01/22 03:13:15 [INFO] generate received request\n2021/01/22 03:13:15 [INFO] received CSR\n2021/01/22 03:13:15 [INFO] generating key: rsa-2048\n2021/01/22 03:13:16 [INFO] encoded CSR\n2021/01/22 03:13:16 [INFO] signed certificate with serial number 7829389959775856027511225334782039638713905904\n2021/01/22 03:13:16 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for\nwebsites. For more information see the Baseline Requirements for the Issuance and Management\nof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);\nspecifically, section 10.2.3 (\"Information Requirements\
").",
"stderr_lines": [
"2021/01/22 03:13:15 [INFO] generate received request",
"2021/01/22 03:13:15 [INFO] received CSR",
"2021/01/22 03:13:15 [INFO] generating key: rsa-2048",
"2021/01/22 03:13:16 [INFO] encoded CSR",
"2021/01/22 03:13:16 [INFO] signed certificate with serial number 7829389959775856027511225334782039638713905904",
"2021/01/22 03:13:16 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for",
"websites. For more information see the Baseline Requirements for the Issuance and Management",
"of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);",
"specifically, section 10.2.3 (\"Information Requirements\
")."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [deploy : 设置集群参数] *********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-cluster kubernetes --certificate-authority=/etc/ansible/.cluster/ssl/ca.pem --embed-certs=true --server=https://192.168.110.185:6443 --kubeconfig=/etc/ansible/.cluster/kube-proxy.kubeconfig",
"delta":
"0:00:00.656262",
"end":
"2021-01-22 03:13:17.535080",
"rc":
0,
"start":
"2021-01-22 03:13:16.878818",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Cluster \"kubernetes\
" set.",
"stdout_lines": [
"Cluster \"kubernetes\
" set."]}
-
-
TASK [deploy : 设置客户端认证参数] ******************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-credentials kube-proxy --client-certificate=/etc/ansible/.cluster/ssl/kube-proxy.pem --client-key=/etc/ansible/.cluster/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=/etc/ansible/.cluster/kube-proxy.kubeconfig",
"delta":
"0:00:00.660415",
"end":
"2021-01-22 03:13:18.531150",
"rc":
0,
"start":
"2021-01-22 03:13:17.870735",
"stderr":
"",
"stderr_lines": [],
"stdout":
"User \"kube-proxy\
" set.",
"stdout_lines": [
"User \"kube-proxy\
" set."]}
-
-
TASK [deploy : 设置上下文参数] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=/etc/ansible/.cluster/kube-proxy.kubeconfig",
"delta":
"0:00:00.662868",
"end":
"2021-01-22 03:13:19.524466",
"rc":
0,
"start":
"2021-01-22 03:13:18.861598",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Context \"
default\
" created.",
"stdout_lines": [
"Context \"
default\
" created."]}
-
-
TASK [deploy : 选择默认上下文] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config use-context default --kubeconfig=/etc/ansible/.cluster/kube-proxy.kubeconfig",
"delta":
"0:00:00.671991",
"end":
"2021-01-22 03:13:20.529033",
"rc":
0,
"start":
"2021-01-22 03:13:19.857042",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Switched to context \"
default\
".",
"stdout_lines": [
"Switched to context \"
default\
"."]}
-
-
TASK [deploy : 准备kube-controller-manager 证书签名请求] *******************************
-
changed: [localhost] => {
"changed":
true,
"checksum":
"6165a16ac692dba54f87507df4b6a27fedf7cb62",
"dest":
"/etc/ansible/.cluster/ssl/kube-controller-manager-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"2b6e55be4c6b54d57ce340209073a3ed",
"mode":
"0640",
"owner":
"root",
"size":
266,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285200.7-105996898081584/source",
"state":
"file",
"uid":
0}
-
-
TASK [deploy : 创建 kube-controller-manager证书与私钥] ********************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"cd /etc/ansible/.cluster/ssl && /etc/ansible/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | /etc/ansible/bin/cfssljson -bare kube-controller-manager",
"delta":
"0:00:01.402277",
"end":
"2021-01-22 03:13:22.900391",
"rc":
0,
"start":
"2021-01-22 03:13:21.498114",
"stderr":
"2021/01/22 03:13:22 [INFO] generate received request\n2021/01/22 03:13:22 [INFO] received CSR\n2021/01/22 03:13:22 [INFO] generating key: rsa-2048\n2021/01/22 03:13:22 [INFO] encoded CSR\n2021/01/22 03:13:22 [INFO] signed certificate with serial number 680027304130350542981131508914649003440343666124\n2021/01/22 03:13:22 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for\nwebsites. For more information see the Baseline Requirements for the Issuance and Management\nof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);\nspecifically, section 10.2.3 (\"Information Requirements\
").",
"stderr_lines": [
"2021/01/22 03:13:22 [INFO] generate received request",
"2021/01/22 03:13:22 [INFO] received CSR",
"2021/01/22 03:13:22 [INFO] generating key: rsa-2048",
"2021/01/22 03:13:22 [INFO] encoded CSR",
"2021/01/22 03:13:22 [INFO] signed certificate with serial number 680027304130350542981131508914649003440343666124",
"2021/01/22 03:13:22 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for",
"websites. For more information see the Baseline Requirements for the Issuance and Management",
"of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);",
"specifically, section 10.2.3 (\"Information Requirements\
")."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [deploy : 设置集群参数] *********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-cluster kubernetes --certificate-authority=/etc/ansible/.cluster/ssl/ca.pem --embed-certs=true --server=https://192.168.110.185:6443 --kubeconfig=/etc/ansible/.cluster/kube-controller-manager.kubeconfig",
"delta":
"0:00:00.663708",
"end":
"2021-01-22 03:13:23.898026",
"rc":
0,
"start":
"2021-01-22 03:13:23.234318",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Cluster \"kubernetes\
" set.",
"stdout_lines": [
"Cluster \"kubernetes\
" set."]}
-
-
TASK [deploy : 设置认证参数] *********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-credentials system:kube-controller-manager --client-certificate=/etc/ansible/.cluster/ssl/kube-controller-manager.pem --client-key=/etc/ansible/.cluster/ssl/kube-controller-manager-key.pem --embed-certs=true --kubeconfig=/etc/ansible/.cluster/kube-controller-manager.kubeconfig",
"delta":
"0:00:00.667449",
"end":
"2021-01-22 03:13:24.947160",
"rc":
0,
"start":
"2021-01-22 03:13:24.279711",
"stderr":
"",
"stderr_lines": [],
"stdout":
"User \"
system:kube-controller-manager\
" set.",
"stdout_lines": [
"User \"
system:kube-controller-manager\
" set."]}
-
-
TASK [deploy : 设置上下文参数] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-context default --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/etc/ansible/.cluster/kube-controller-manager.kubeconfig",
"delta":
"0:00:00.676281",
"end":
"2021-01-22 03:13:25.959394",
"rc":
0,
"start":
"2021-01-22 03:13:25.283113",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Context \"
default\
" created.",
"stdout_lines": [
"Context \"
default\
" created."]}
-
-
TASK [deploy : 选择默认上下文] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config use-context default --kubeconfig=/etc/ansible/.cluster/kube-controller-manager.kubeconfig",
"delta":
"0:00:00.938509",
"end":
"2021-01-22 03:13:27.258197",
"rc":
0,
"start":
"2021-01-22 03:13:26.319688",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Switched to context \"
default\
".",
"stdout_lines": [
"Switched to context \"
default\
"."]}
-
-
TASK [deploy : 准备kube-scheduler 证书签名请求] ****************************************
-
changed: [localhost] => {
"changed":
true,
"checksum":
"b00f1baf902015882735e4d46b16a0996a214fef",
"dest":
"/etc/ansible/.cluster/ssl/kube-scheduler-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"ce544519fa8c775bbcb37ed9d13562a2",
"mode":
"0640",
"owner":
"root",
"size":
248,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285207.43-225019440986446/source",
"state":
"file",
"uid":
0}
-
-
TASK [deploy : 创建 kube-scheduler证书与私钥] *****************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"cd /etc/ansible/.cluster/ssl && /etc/ansible/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | /etc/ansible/bin/cfssljson -bare kube-scheduler",
"delta":
"0:00:01.441404",
"end":
"2021-01-22 03:13:29.695420",
"rc":
0,
"start":
"2021-01-22 03:13:28.254016",
"stderr":
"2021/01/22 03:13:28 [INFO] generate received request\n2021/01/22 03:13:28 [INFO] received CSR\n2021/01/22 03:13:28 [INFO] generating key: rsa-2048\n2021/01/22 03:13:29 [INFO] encoded CSR\n2021/01/22 03:13:29 [INFO] signed certificate with serial number 133950420575820641993840119993846159410414994381\n2021/01/22 03:13:29 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for\nwebsites. For more information see the Baseline Requirements for the Issuance and Management\nof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);\nspecifically, section 10.2.3 (\"Information Requirements\
").",
"stderr_lines": [
"2021/01/22 03:13:28 [INFO] generate received request",
"2021/01/22 03:13:28 [INFO] received CSR",
"2021/01/22 03:13:28 [INFO] generating key: rsa-2048",
"2021/01/22 03:13:29 [INFO] encoded CSR",
"2021/01/22 03:13:29 [INFO] signed certificate with serial number 133950420575820641993840119993846159410414994381",
"2021/01/22 03:13:29 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for",
"websites. For more information see the Baseline Requirements for the Issuance and Management",
"of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);",
"specifically, section 10.2.3 (\"Information Requirements\
")."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [deploy : 设置集群参数] *********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-cluster kubernetes --certificate-authority=/etc/ansible/.cluster/ssl/ca.pem --embed-certs=true --server=https://192.168.110.185:6443 --kubeconfig=/etc/ansible/.cluster/kube-scheduler.kubeconfig",
"delta":
"0:00:00.694388",
"end":
"2021-01-22 03:13:30.801286",
"rc":
0,
"start":
"2021-01-22 03:13:30.106898",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Cluster \"kubernetes\
" set.",
"stdout_lines": [
"Cluster \"kubernetes\
" set."]}
-
-
TASK [deploy : 设置认证参数] *********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-credentials system:kube-scheduler --client-certificate=/etc/ansible/.cluster/ssl/kube-scheduler.pem --client-key=/etc/ansible/.cluster/ssl/kube-scheduler-key.pem --embed-certs=true --kubeconfig=/etc/ansible/.cluster/kube-scheduler.kubeconfig",
"delta":
"0:00:00.707627",
"end":
"2021-01-22 03:13:31.853071",
"rc":
0,
"start":
"2021-01-22 03:13:31.145444",
"stderr":
"",
"stderr_lines": [],
"stdout":
"User \"
system:kube-scheduler\
" set.",
"stdout_lines": [
"User \"
system:kube-scheduler\
" set."]}
-
-
TASK [deploy : 设置上下文参数] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config set-context default --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=/etc/ansible/.cluster/kube-scheduler.kubeconfig",
"delta":
"0:00:00.691414",
"end":
"2021-01-22 03:13:32.887167",
"rc":
0,
"start":
"2021-01-22 03:13:32.195753",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Context \"
default\
" created.",
"stdout_lines": [
"Context \"
default\
" created."]}
-
-
TASK [deploy : 选择默认上下文] ********************************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl config use-context default --kubeconfig=/etc/ansible/.cluster/kube-scheduler.kubeconfig",
"delta":
"0:00:00.696396",
"end":
"2021-01-22 03:13:33.927528",
"rc":
0,
"start":
"2021-01-22 03:13:33.231132",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Switched to context \"
default\
".",
"stdout_lines": [
"Switched to context \"
default\
"."]}
-
-
TASK [deploy : 本地创建 easzctl 工具的软连接] ********************************************
-
ok: [localhost] => {
"changed":
false,
"dest":
"/usr/bin/easzctl",
"gid":
0,
"group":
"root",
"mode":
"0777",
"owner":
"root",
"size":
26,
"src":
"/etc/ansible/tools/easzctl",
"state":
"link",
"uid":
0}
-
-
TASK [deploy : ansible 控制端创建 kubectl 软链接] **************************************
-
changed: [localhost] => {
"changed":
true,
"dest":
"/usr/bin/kubectl",
"gid":
0,
"group":
"root",
"mode":
"0777",
"owner":
"root",
"size":
24,
"src":
"/etc/ansible/bin/kubectl",
"state":
"link",
"uid":
0}
-
-
TASK [deploy : 注册变量以判断是否容器化运行ansible控制端] ***************************************
-
changed: [localhost] => {
"changed":
true,
"cmd":
"ps aux|wc -l",
"delta":
"0:00:00.599562",
"end":
"2021-01-22 03:13:35.561952",
"rc":
0,
"start":
"2021-01-22 03:13:34.962390",
"stderr":
"",
"stderr_lines": [],
"stdout":
"15",
"stdout_lines": [
"15"]}
-
-
TASK [deploy : ansible 控制端写入环境变量$
PATH] *****************************************
-
-
TASK [deploy : ansible 控制端添加 kubectl 自动补全] *************************************
-
-
TASK [deploy : pip
install netaddr] ********************************************
-
-
PLAY [kube-
master,kube-node,etcd] **********************************************
-
-
TASK [
prepare : apt更新缓存刷新] *****************************************************
-
-
TASK [
prepare : 删除ubuntu默认安装] **************************************************
-
-
TASK [
prepare : 安装 ubuntu/debian基础软件] ******************************************
-
-
TASK [
prepare : 准备离线安装包目录] *****************************************************
-
-
TASK [
prepare : 分发 basic_xenial 离线包] *******************************************
-
-
TASK [
prepare : 安装 basic_xenial 离线包] *******************************************
-
-
TASK [
prepare : 分发 basic_bionic 离线包] *******************************************
-
-
TASK [
prepare : 安装 basic_bionic 离线包] *******************************************
-
-
TASK [
prepare : 分发 basic_centos7 离线包] ******************************************
-
-
TASK [
prepare : 安装 basic_centos7 离线包] ******************************************
-
-
TASK [
prepare : 分发 basic_stretch 离线包] ******************************************
-
-
TASK [
prepare : 安装 basic_stretch 离线包] ******************************************
-
-
TASK [
prepare : 分发 basic_buster 离线包] *******************************************
-
-
TASK [
prepare : 安装 basic_buster 离线包] *******************************************
-
-
TASK [
prepare : 准备 journal 日志相关目录] *********************************************
-
-
TASK [
prepare : 优化设置 journal 日志] ***********************************************
-
-
TASK [
prepare : 重启 journald 服务] ************************************************
-
-
TASK [
prepare : 删除centos/redhat默认安装] *******************************************
-
-
TASK [
prepare : 添加Amazon EPEL仓库] ***********************************************
-
-
TASK [
prepare : 安装基础软件包] *******************************************************
-
-
TASK [
prepare : 准备离线安装包目录] *****************************************************
-
-
TASK [
prepare : 分发 basic_xenial 离线包] *******************************************
-
-
TASK [
prepare : 安装 basic_xenial 离线包] *******************************************
-
-
TASK [
prepare : 分发 basic_bionic 离线包] *******************************************
-
-
TASK [
prepare : 安装 basic_bionic 离线包] *******************************************
-
-
TASK [
prepare : 分发 basic_centos7 离线包] ******************************************
-
-
TASK [
prepare : 安装 basic_centos7 离线包] ******************************************
-
-
TASK [
prepare : 分发 basic_stretch 离线包] ******************************************
-
-
TASK [
prepare : 安装 basic_stretch 离线包] ******************************************
-
-
TASK [
prepare : 分发 basic_buster 离线包] *******************************************
-
-
TASK [
prepare : 安装 basic_buster 离线包] *******************************************
-
-
TASK [
prepare : 临时关闭 selinux] **************************************************
-
-
TASK [
prepare : 永久关闭 selinux] **************************************************
-
-
TASK [
prepare : 禁止rsyslog获取journald日志
1] ****************************************
-
-
TASK [
prepare : 禁止rsyslog获取journald日志
2] ****************************************
-
-
TASK [
prepare : 重启rsyslog服务] ***************************************************
-
-
TASK [
prepare : 禁用系统 swap] *****************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"swapoff -a && sysctl -w vm.swappiness=0",
"delta":
"0:00:01.051181",
"end":
"2021-01-22 11:13:43.491181",
"rc":
0,
"start":
"2021-01-22 11:13:42.440000",
"stderr":
"",
"stderr_lines": [],
"stdout":
"vm.swappiness = 0",
"stdout_lines": [
"vm.swappiness = 0"]}
-
-
TASK [
prepare : 删除fstab swap 相关配置] *********************************************
-
ok: [
192.168
.110
.185] => {
"backup":
"",
"changed":
false,
"found":
0,
"msg":
""}
-
-
TASK [
prepare : 转换内核版本为浮点数] ****************************************************
-
ok: [
192.168
.110
.185] => {
"ansible_facts": {
"KERNEL_VER":
"4.19"},
"changed":
false}
-
-
TASK [
prepare : 设置 nf_conntrack 模块名] *******************************************
-
ok: [
192.168
.110
.185] => {
"ansible_facts": {
"NF_CONNTRACK":
"nf_conntrack"},
"changed":
false}
-
-
TASK [
prepare : 设置 nf_conntrack_ipv4 模块名] **************************************
-
-
TASK [
prepare : 加载内核模块] ********************************************************
-
ok: [
192.168
.110
.185] => (item=br_netfilter) => {
"changed":
false,
"item":
"br_netfilter",
"name":
"br_netfilter",
"params":
"",
"state":
"present"}
-
changed: [
192.168
.110
.185] => (item=ip_vs) => {
"changed":
true,
"item":
"ip_vs",
"name":
"ip_vs",
"params":
"",
"state":
"present"}
-
changed: [
192.168
.110
.185] => (item=ip_vs_rr) => {
"changed":
true,
"item":
"ip_vs_rr",
"name":
"ip_vs_rr",
"params":
"",
"state":
"present"}
-
changed: [
192.168
.110
.185] => (item=ip_vs_wrr) => {
"changed":
true,
"item":
"ip_vs_wrr",
"name":
"ip_vs_wrr",
"params":
"",
"state":
"present"}
-
changed: [
192.168
.110
.185] => (item=ip_vs_sh) => {
"changed":
true,
"item":
"ip_vs_sh",
"name":
"ip_vs_sh",
"params":
"",
"state":
"present"}
-
ok: [
192.168
.110
.185] => (item=nf_conntrack) => {
"changed":
false,
"item":
"nf_conntrack",
"name":
"nf_conntrack",
"params":
"",
"state":
"present"}
-
-
TASK [
prepare : 启用systemd自动加载模块服务] *********************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"enabled":
true,
"name":
"systemd-modules-load",
"status": {
"ActiveEnterTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"ActiveEnterTimestampMonotonic":
"7655161",
"ActiveExitTimestamp":
"Fri 2021-01-22 11:02:25 CST",
"ActiveExitTimestampMonotonic":
"6320364",
"ActiveState":
"active",
"After":
"systemd-journald.socket system.slice",
"AllowIsolate":
"no",
"AmbientCapabilities":
"",
"AssertResult":
"yes",
"AssertTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"AssertTimestampMonotonic":
"7344327",
"Before":
"shutdown.target sys-fs-fuse-connections.mount sysinit.target systemd-sysctl.service sys-kernel-config.mount",
"BlockIOAccounting":
"no",
"BlockIOWeight":
"[not set]",
"CPUAccounting":
"no",
"CPUAffinity":
"",
"CPUQuotaPerSecUSec":
"infinity",
"CPUQuotaPeriodUSec":
"infinity",
"CPUSchedulingPolicy":
"0",
"CPUSchedulingPriority":
"0",
"CPUSchedulingResetOnFork":
"no",
"CPUShares":
"[not set]",
"CPUUsageNSec":
"[not set]",
"CPUWeight":
"[not set]",
"CacheDirectoryMode":
"0755",
"CanIsolate":
"no",
"CanReload":
"no",
"CanStart":
"yes",
"CanStop":
"yes",
"CapabilityBoundingSet":
"cap_chown cap_dac_override cap_dac_read_search cap_fowner cap_fsetid cap_kill cap_setgid cap_setuid cap_setpcap cap_linux_immutable cap_net_bind_service cap_net_broadcast cap_net_admin cap_net_raw cap_ipc_lock cap_ipc_owner cap_sys_module cap_sys_rawio cap_sys_chroot cap_sys_ptrace cap_sys_pacct cap_sys_admin cap_sys_boot cap_sys_nice cap_sys_resource cap_sys_time cap_sys_tty_config cap_mknod cap_lease cap_audit_write cap_audit_control cap_setfcap cap_mac_override cap_mac_admin cap_syslog cap_wake_alarm cap_block_suspend cap_audit_read",
"CleanResult":
"success",
"CollectMode":
"inactive",
"ConditionResult":
"yes",
"ConditionTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"ConditionTimestampMonotonic":
"7342989",
"ConfigurationDirectoryMode":
"0755",
"Conflicts":
"shutdown.target",
"ControlGroup":
"/system.slice/systemd-modules-load.service",
"ControlPID":
"0",
"DefaultDependencies":
"no",
"DefaultMemoryLow":
"0",
"DefaultMemoryMin":
"0",
"Delegate":
"no",
"Description":
"Load Kernel Modules",
"DevicePolicy":
"auto",
"Documentation":
"man:systemd-modules-load.service(8) man:modules-load.d(5)",
"DynamicUser":
"no",
"ExecMainCode":
"1",
"ExecMainExitTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"ExecMainExitTimestampMonotonic":
"7653966",
"ExecMainPID":
"643",
"ExecMainStartTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"ExecMainStartTimestampMonotonic":
"7346329",
"ExecMainStatus":
"0",
"ExecStart":
"{ path=/usr/lib/systemd/systemd-modules-load ; argv[]=/usr/lib/systemd/systemd-modules-load ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStartEx":
"{ path=/usr/lib/systemd/systemd-modules-load ; argv[]=/usr/lib/systemd/systemd-modules-load ; flags= ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FailureAction":
"none",
"FileDescriptorStoreMax":
"0",
"FinalKillSignal":
"9",
"FragmentPath":
"/usr/lib/systemd/system/systemd-modules-load.service",
"GID":
"[not set]",
"GuessMainPID":
"yes",
"IOAccounting":
"no",
"IOReadBytes":
"18446744073709551615",
"IOReadOperations":
"18446744073709551615",
"IOSchedulingClass":
"0",
"IOSchedulingPriority":
"0",
"IOWeight":
"[not set]",
"IOWriteBytes":
"18446744073709551615",
"IOWriteOperations":
"18446744073709551615",
"IPAccounting":
"no",
"IPEgressBytes":
"[no data]",
"IPEgressPackets":
"[no data]",
"IPIngressBytes":
"[no data]",
"IPIngressPackets":
"[no data]",
"Id":
"systemd-modules-load.service",
"IgnoreOnIsolate":
"no",
"IgnoreSIGPIPE":
"yes",
"InactiveEnterTimestamp":
"Fri 2021-01-22 11:02:25 CST",
"InactiveEnterTimestampMonotonic":
"6320364",
"InactiveExitTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"InactiveExitTimestampMonotonic":
"7346496",
"InvocationID":
"ef5421c089ad42f8bfe7f54223e496d8",
"JobRunningTimeoutUSec":
"infinity",
"JobTimeoutAction":
"none",
"JobTimeoutUSec":
"infinity",
"KeyringMode":
"private",
"KillMode":
"control-group",
"KillSignal":
"15",
"LimitAS":
"infinity",
"LimitASSoft":
"infinity",
"LimitCORE":
"infinity",
"LimitCORESoft":
"infinity",
"LimitCPU":
"infinity",
"LimitCPUSoft":
"infinity",
"LimitDATA":
"infinity",
"LimitDATASoft":
"infinity",
"LimitFSIZE":
"infinity",
"LimitFSIZESoft":
"infinity",
"LimitLOCKS":
"infinity",
"LimitLOCKSSoft":
"infinity",
"LimitMEMLOCK":
"65536",
"LimitMEMLOCKSoft":
"65536",
"LimitMSGQUEUE":
"819200",
"LimitMSGQUEUESoft":
"819200",
"LimitNICE":
"0",
"LimitNICESoft":
"0",
"LimitNOFILE":
"100000",
"LimitNOFILESoft":
"100000",
"LimitNPROC":
"100000",
"LimitNPROCSoft":
"100000",
"LimitRSS":
"infinity",
"LimitRSSSoft":
"infinity",
"LimitRTPRIO":
"0",
"LimitRTPRIOSoft":
"0",
"LimitRTTIME":
"infinity",
"LimitRTTIMESoft":
"infinity",
"LimitSIGPENDING":
"60864",
"LimitSIGPENDINGSoft":
"60864",
"LimitSTACK":
"infinity",
"LimitSTACKSoft":
"8388608",
"LoadState":
"loaded",
"LockPersonality":
"no",
"LogLevelMax":
"-1",
"LogRateLimitBurst":
"0",
"LogRateLimitIntervalUSec":
"0",
"LogsDirectoryMode":
"0755",
"MainPID":
"0",
"MemoryAccounting":
"yes",
"MemoryCurrent":
"0",
"MemoryDenyWriteExecute":
"no",
"MemoryHigh":
"infinity",
"MemoryLimit":
"infinity",
"MemoryLow":
"0",
"MemoryMax":
"infinity",
"MemoryMin":
"0",
"MemorySwapMax":
"infinity",
"MountAPIVFS":
"no",
"MountFlags":
"",
"NFileDescriptorStore":
"0",
"NRestarts":
"0",
"NUMAMask":
"",
"NUMAPolicy":
"n/a",
"Names":
"systemd-modules-load.service",
"NeedDaemonReload":
"no",
"Nice":
"0",
"NoNewPrivileges":
"no",
"NonBlocking":
"no",
"NotifyAccess":
"none",
"OOMPolicy":
"stop",
"OOMScoreAdjust":
"0",
"OnFailureJobMode":
"replace",
"Perpetual":
"no",
"PrivateDevices":
"no",
"PrivateMounts":
"no",
"PrivateNetwork":
"no",
"PrivateTmp":
"no",
"PrivateUsers":
"no",
"ProtectControlGroups":
"no",
"ProtectHome":
"no",
"ProtectHostname":
"no",
"ProtectKernelModules":
"no",
"ProtectKernelTunables":
"no",
"ProtectSystem":
"no",
"RefuseManualStart":
"no",
"RefuseManualStop":
"no",
"ReloadResult":
"success",
"RemainAfterExit":
"yes",
"RemoveIPC":
"no",
"Requires":
"system.slice",
"Restart":
"no",
"RestartUSec":
"100ms",
"RestrictNamespaces":
"no",
"RestrictRealtime":
"no",
"RestrictSUIDSGID":
"no",
"Result":
"success",
"RootDirectoryStartOnly":
"no",
"RuntimeDirectoryMode":
"0755",
"RuntimeDirectoryPreserve":
"no",
"RuntimeMaxUSec":
"infinity",
"SameProcessGroup":
"no",
"SecureBits":
"0",
"SendSIGHUP":
"no",
"SendSIGKILL":
"yes",
"Slice":
"system.slice",
"StandardError":
"inherit",
"StandardInput":
"null",
"StandardInputData":
"",
"StandardOutput":
"journal",
"StartLimitAction":
"none",
"StartLimitBurst":
"5",
"StartLimitIntervalUSec":
"10s",
"StartupBlockIOWeight":
"[not set]",
"StartupCPUShares":
"[not set]",
"StartupCPUWeight":
"[not set]",
"StartupIOWeight":
"[not set]",
"StateChangeTimestamp":
"Fri 2021-01-22 11:02:26 CST",
"StateChangeTimestampMonotonic":
"7655161",
"StateDirectoryMode":
"0755",
"StatusErrno":
"0",
"StopWhenUnneeded":
"no",
"SubState":
"exited",
"SuccessAction":
"none",
"SyslogFacility":
"3",
"SyslogLevel":
"6",
"SyslogLevelPrefix":
"yes",
"SyslogPriority":
"30",
"SystemCallErrorNumber":
"0",
"TTYReset":
"no",
"TTYVHangup":
"no",
"TTYVTDisallocate":
"no",
"TasksAccounting":
"yes",
"TasksCurrent":
"0",
"TasksMax":
"infinity",
"TimeoutAbortUSec":
"1min 30s",
"TimeoutCleanUSec":
"infinity",
"TimeoutStartUSec":
"1min 30s",
"TimeoutStopUSec":
"1min 30s",
"TimerSlackNSec":
"50000",
"Transient":
"no",
"Type":
"oneshot",
"UID":
"[not set]",
"UMask":
"0022",
"UnitFilePreset":
"disabled",
"UnitFileState":
"static",
"UtmpMode":
"init",
"WantedBy":
"sysinit.target",
"WatchdogSignal":
"6",
"WatchdogTimestampMonotonic":
"0",
"WatchdogUSec":
"0"}}
-
-
TASK [
prepare : 增加内核模块开机加载配置] **************************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"3cbd24b09b5e60287df60af1eb6cbabdc0b81fee",
"dest":
"/etc/modules-load.d/10-k8s-modules.conf",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/modules-load.d/10-k8s-modules.conf",
"size":
60,
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 设置系统参数] ********************************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"a006e10edfa6ea747276e487e8a6adb9f10cc2ac",
"dest":
"/etc/sysctl.d/95-k8s-sysctl.conf",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/sysctl.d/95-k8s-sysctl.conf",
"size":
400,
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 生效系统参数] ********************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"sysctl -p /etc/sysctl.d/95-k8s-sysctl.conf",
"delta":
"0:00:00.043762",
"end":
"2021-01-22 11:13:53.407076",
"rc":
0,
"start":
"2021-01-22 11:13:53.363314",
"stderr":
"",
"stderr_lines": [],
"stdout":
"net.ipv4.ip_forward = 1\nnet.bridge.bridge-nf-call-iptables = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.bridge.bridge-nf-call-arptables = 1\nnet.ipv4.tcp_tw_reuse = 0\nnet.core.somaxconn = 32768\nnet.netfilter.nf_conntrack_max = 1000000\nvm.swappiness = 0\nvm.max_map_count = 655360\nfs.file-max = 6553600\nnet.ipv4.tcp_keepalive_time = 600\nnet.ipv4.tcp_keepalive_intvl = 30\nnet.ipv4.tcp_keepalive_probes = 10",
"stdout_lines": [
"net.ipv4.ip_forward = 1",
"net.bridge.bridge-nf-call-iptables = 1",
"net.bridge.bridge-nf-call-ip6tables = 1",
"net.bridge.bridge-nf-call-arptables = 1",
"net.ipv4.tcp_tw_reuse = 0",
"net.core.somaxconn = 32768",
"net.netfilter.nf_conntrack_max = 1000000",
"vm.swappiness = 0",
"vm.max_map_count = 655360",
"fs.file-max = 6553600",
"net.ipv4.tcp_keepalive_time = 600",
"net.ipv4.tcp_keepalive_intvl = 30",
"net.ipv4.tcp_keepalive_probes = 10"]}
-
-
TASK [
prepare : 创建 systemd 配置目录] ***********************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"gid":
0,
"group":
"root",
"mode":
"0700",
"owner":
"root",
"path":
"/etc/systemd/system.conf.d",
"size":
33,
"state":
"directory",
"uid":
0}
-
-
TASK [
prepare : 设置系统 ulimits] **************************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"44674aba45a2e522ac5d696ee09fe1eb8144162d",
"dest":
"/etc/systemd/system.conf.d/30-k8s-ulimits.conf",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/systemd/system.conf.d/30-k8s-ulimits.conf",
"size":
87,
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 把SCTP列入内核模块黑名单] ************************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"05e7e62dae6e9b835b0ca67baaeb85660b0f69e0",
"dest":
"/etc/modprobe.d/sctp.conf",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/modprobe.d/sctp.conf",
"size":
49,
"state":
"file",
"uid":
0}
-
-
TASK [
prepare :
prepare
some dirs] *********************************************
-
ok: [
192.168
.110
.185] => (item=/opt/kube/
bin) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/opt/kube/bin",
"mode":
"0700",
"owner":
"root",
"path":
"/opt/kube/bin",
"size":
4096,
"state":
"directory",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=/etc/kubernetes/ssl) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/etc/kubernetes/ssl",
"mode":
"0700",
"owner":
"root",
"path":
"/etc/kubernetes/ssl",
"size":
6,
"state":
"directory",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=/root/.kube) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/root/.kube",
"mode":
"0755",
"owner":
"root",
"path":
"/root/.kube",
"size":
20,
"state":
"directory",
"uid":
0}
-
-
TASK [
prepare : 分发证书工具 CFSSL] **************************************************
-
ok: [
192.168
.110
.185] => (item=cfssl) => {
"changed":
false,
"checksum":
"459d47a5d6104cb41a57bbb184636d8bafcbdf07",
"dest":
"/opt/kube/bin/cfssl",
"gid":
0,
"group":
"root",
"item":
"cfssl",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/cfssl",
"size":
16773946,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=cfssl-certinfo) => {
"changed":
false,
"checksum":
"38f24fa1549a36b9489144e4479b9176fb865d2a",
"dest":
"/opt/kube/bin/cfssl-certinfo",
"gid":
0,
"group":
"root",
"item":
"cfssl-certinfo",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/cfssl-certinfo",
"size":
12691027,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=cfssljson) => {
"changed":
false,
"checksum":
"4f99165d00c1183752155978a592173fdb6e0fdb",
"dest":
"/opt/kube/bin/cfssljson",
"gid":
0,
"group":
"root",
"item":
"cfssljson",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/cfssljson",
"size":
2784648,
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 写入环境变量$
PATH] ***************************************************
-
changed: [
192.168
.110
.185] => {
"backup":
"",
"changed":
true,
"msg":
"line added"}
-
-
TASK [
prepare : 分发证书相关] ********************************************************
-
changed: [
192.168
.110
.185] => (item=admin.pem) => {
"changed":
true,
"checksum":
"7c5619bdb6083a0a786b22b20b87cc8baca75f1d",
"dest":
"/etc/kubernetes/ssl/admin.pem",
"gid":
0,
"group":
"root",
"item":
"admin.pem",
"md5sum":
"d25d3d13ae5c013f1dab4760fc5c8e34",
"mode":
"0600",
"owner":
"root",
"size":
1391,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285242.87-206868018297576/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=
admin-key.pem) => {
"changed":
true,
"checksum":
"12bdd3e666388afb121ed3cc45d58752bf2f57c9",
"dest":
"/etc/kubernetes/ssl/admin-key.pem",
"gid":
0,
"group":
"root",
"item":
"admin-key.pem",
"md5sum":
"413d563138706492882275281dbe933d",
"mode":
"0600",
"owner":
"root",
"size":
1679,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285244.12-173142179255806/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=ca.pem) => {
"changed":
true,
"checksum":
"6db44bf0a83f201227793a4cdb817c55b6a02bb3",
"dest":
"/etc/kubernetes/ssl/ca.pem",
"gid":
0,
"group":
"root",
"item":
"ca.pem",
"md5sum":
"adcbaa650f5994a3296372ba0458a175",
"mode":
"0600",
"owner":
"root",
"size":
1302,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285245.41-236646102229971/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=ca-key.pem) => {
"changed":
true,
"checksum":
"5395c6b23d898ada9c2825dbaf542ab7bb40470f",
"dest":
"/etc/kubernetes/ssl/ca-key.pem",
"gid":
0,
"group":
"root",
"item":
"ca-key.pem",
"md5sum":
"a8b0afc87af1f6b7510743e226868f4f",
"mode":
"0600",
"owner":
"root",
"size":
1679,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285246.64-244780488706120/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=ca-config.json) => {
"changed":
true,
"checksum":
"24e9422c9c2462295c458129016d10ae6d8b5327",
"dest":
"/etc/kubernetes/ssl/ca-config.json",
"gid":
0,
"group":
"root",
"item":
"ca-config.json",
"md5sum":
"49df98e6482eefad0d0bfa0fad148033",
"mode":
"0600",
"owner":
"root",
"size":
294,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285247.91-114307267071341/source",
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 添加 kubectl 命令自动补全] *********************************************
-
changed: [
192.168
.110
.185] => {
"backup":
"",
"changed":
true,
"msg":
"line added"}
-
-
TASK [
prepare : 分发 kubeconfig配置文件] *********************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"ec3839ff579d03c5ee7a4f56c30db456a227d34f",
"dest":
"/root/.kube/config",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/root/.kube/config",
"size":
6199,
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 分发 kube-proxy.kubeconfig配置文件] **********************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"be3417f73f8f1404580e96a948524d4d0c3f3743",
"dest":
"/etc/kubernetes/kube-proxy.kubeconfig",
"gid":
0,
"group":
"root",
"md5sum":
"ade35f2428f642a6cf602ecac81cdefb",
"mode":
"0600",
"owner":
"root",
"size":
6187,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285252.94-210840100157065/source",
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 分发 kube-controller-manager.kubeconfig配置文件] *********************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"d71aa57c942bd088e4f492f1fb41bef366b8a476",
"dest":
"/etc/kubernetes/kube-controller-manager.kubeconfig",
"gid":
0,
"group":
"root",
"md5sum":
"0fe3bccd65d36b09cd41f8b2cc0a0437",
"mode":
"0600",
"owner":
"root",
"size":
6299,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285256.0-158182851971305/source",
"state":
"file",
"uid":
0}
-
-
TASK [
prepare : 分发 kube-scheduler.kubeconfig配置文件] ******************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"e298de572ee5098a9e15c55618fabef3d7fbefc9",
"dest":
"/etc/kubernetes/kube-scheduler.kubeconfig",
"gid":
0,
"group":
"root",
"md5sum":
"ca9e2eb8921d4a9b8f5e7ce98b3d6b80",
"mode":
"0600",
"owner":
"root",
"size":
6245,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285259.1-278073031884913/source",
"state":
"file",
"uid":
0}
-
-
PLAY [etcd] ********************************************************************
-
-
TASK [etcd :
prepare
some dirs] ************************************************
-
ok: [
192.168
.110
.185] => (item=/opt/kube/
bin) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/opt/kube/bin",
"mode":
"0700",
"owner":
"root",
"path":
"/opt/kube/bin",
"size":
4096,
"state":
"directory",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=/etc/kubernetes/ssl) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/etc/kubernetes/ssl",
"mode":
"0700",
"owner":
"root",
"path":
"/etc/kubernetes/ssl",
"size":
98,
"state":
"directory",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=/etc/etcd/ssl) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/etc/etcd/ssl",
"mode":
"0700",
"owner":
"root",
"path":
"/etc/etcd/ssl",
"size":
6,
"state":
"directory",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=/
var/lib/etcd) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/var/lib/etcd",
"mode":
"0700",
"owner":
"root",
"path":
"/var/lib/etcd",
"size":
6,
"state":
"directory",
"uid":
0}
-
-
TASK [etcd : 下载etcd二进制文件] ******************************************************
-
ok: [
192.168
.110
.185] => (item=etcd) => {
"changed":
false,
"checksum":
"c08d9b7c8079a5bd68d62cca160fa6d60367d41a",
"dest":
"/opt/kube/bin/etcd",
"gid":
0,
"group":
"root",
"item":
"etcd",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/etcd",
"size":
29621657,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=etcdctl) => {
"changed":
false,
"checksum":
"23791923462a6ac3dddd2b7caaf886f5bd84b27f",
"dest":
"/opt/kube/bin/etcdctl",
"gid":
0,
"group":
"root",
"item":
"etcdctl",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/etcdctl",
"size":
22025400,
"state":
"file",
"uid":
0}
-
-
TASK [etcd : 分发证书相关] ***********************************************************
-
ok: [
192.168
.110
.185] => (item=ca.pem) => {
"changed":
false,
"checksum":
"6db44bf0a83f201227793a4cdb817c55b6a02bb3",
"dest":
"/etc/kubernetes/ssl/ca.pem",
"gid":
0,
"group":
"root",
"item":
"ca.pem",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/kubernetes/ssl/ca.pem",
"size":
1302,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=ca-key.pem) => {
"changed":
false,
"checksum":
"5395c6b23d898ada9c2825dbaf542ab7bb40470f",
"dest":
"/etc/kubernetes/ssl/ca-key.pem",
"gid":
0,
"group":
"root",
"item":
"ca-key.pem",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/kubernetes/ssl/ca-key.pem",
"size":
1679,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=ca-config.json) => {
"changed":
false,
"checksum":
"24e9422c9c2462295c458129016d10ae6d8b5327",
"dest":
"/etc/kubernetes/ssl/ca-config.json",
"gid":
0,
"group":
"root",
"item":
"ca-config.json",
"mode":
"0600",
"owner":
"root",
"path":
"/etc/kubernetes/ssl/ca-config.json",
"size":
294,
"state":
"file",
"uid":
0}
-
-
TASK [etcd : 创建etcd证书请求] *******************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"0ebddc8081a656bae8772cba22e3c60408bbf5af",
"dest":
"/etc/etcd/ssl/etcd-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"0a68ffd90aac78fadb408a5f6e4eac78",
"mode":
"0600",
"owner":
"root",
"size":
255,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285276.15-277956878039833/source",
"state":
"file",
"uid":
0}
-
-
TASK [etcd : 创建 etcd证书和私钥] *****************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"cd /etc/etcd/ssl && /opt/kube/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes etcd-csr.json | /opt/kube/bin/cfssljson -bare etcd",
"delta":
"0:00:01.078552",
"end":
"2021-01-22 11:14:40.820509",
"rc":
0,
"start":
"2021-01-22 11:14:39.741957",
"stderr":
"2021/01/22 11:14:39 [INFO] generate received request\n2021/01/22 11:14:39 [INFO] received CSR\n2021/01/22 11:14:39 [INFO] generating key: rsa-2048\n2021/01/22 11:14:40 [INFO] encoded CSR\n2021/01/22 11:14:40 [INFO] signed certificate with serial number 407606133245147108209565571721859142309194026547",
"stderr_lines": [
"2021/01/22 11:14:39 [INFO] generate received request",
"2021/01/22 11:14:39 [INFO] received CSR",
"2021/01/22 11:14:39 [INFO] generating key: rsa-2048",
"2021/01/22 11:14:40 [INFO] encoded CSR",
"2021/01/22 11:14:40 [INFO] signed certificate with serial number 407606133245147108209565571721859142309194026547"],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [etcd : 创建etcd的systemd unit文件] ********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"41930c844953ed4bafb6183520e8184ee92d5eb3",
"dest":
"/etc/systemd/system/etcd.service",
"gid":
0,
"group":
"root",
"md5sum":
"87f4cc23badca336a2b48bb26ec339cd",
"mode":
"0600",
"owner":
"root",
"size":
1211,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285281.23-95171800774030/source",
"state":
"file",
"uid":
0}
-
-
TASK [etcd : 开机启用etcd服务] *******************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl enable etcd",
"delta":
"0:00:00.441683",
"end":
"2021-01-22 11:14:45.382112",
"rc":
0,
"start":
"2021-01-22 11:14:44.940429",
"stderr":
"Created symlink /etc/systemd/system/multi-user.target.wants/etcd.service → /etc/systemd/system/etcd.service.",
"stderr_lines": [
"Created symlink /etc/systemd/system/multi-user.target.wants/etcd.service → /etc/systemd/system/etcd.service."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [etcd : 开启etcd服务] *********************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl daemon-reload && systemctl restart etcd",
"delta":
"0:00:01.708052",
"end":
"2021-01-22 11:14:47.893753",
"rc":
0,
"start":
"2021-01-22 11:14:46.185701",
"stderr":
"",
"stderr_lines": [],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [etcd : 以轮询的方式等待服务同步完成] ***************************************************
-
changed: [
192.168
.110
.185] => {
"attempts":
1,
"changed":
true,
"cmd":
"systemctl status etcd.service|grep Active",
"delta":
"0:00:00.060122",
"end":
"2021-01-22 11:14:48.776361",
"rc":
0,
"start":
"2021-01-22 11:14:48.716239",
"stderr":
"",
"stderr_lines": [],
"stdout":
" Active: active (running) since Fri 2021-01-22 11:14:47 CST; 880ms ago",
"stdout_lines": [
" Active: active (running) since Fri 2021-01-22 11:14:47 CST; 880ms ago"]}
-
-
PLAY [kube-
master,kube-node] ***************************************************
-
-
TASK [docker : 获取是否已经安装docker] *************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl status docker|grep Active || echo \"NoFound\
"",
"delta":
"0:00:00.060885",
"end":
"2021-01-22 11:14:49.747151",
"rc":
0,
"start":
"2021-01-22 11:14:49.686266",
"stderr":
"",
"stderr_lines": [],
"stdout":
" Active: active (running) since Fri 2021-01-22 11:02:38 CST; 12min ago",
"stdout_lines": [
" Active: active (running) since Fri 2021-01-22 11:02:38 CST; 12min ago"]}
-
-
TASK [docker : 获取是否已经安装containerd] *********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl status containerd|grep Active || echo \"NoFound\
"",
"delta":
"0:00:00.057793",
"end":
"2021-01-22 11:14:50.676723",
"rc":
0,
"start":
"2021-01-22 11:14:50.618930",
"stderr":
"Unit containerd.service could not be found.",
"stderr_lines": [
"Unit containerd.service could not be found."],
"stdout":
"NoFound",
"stdout_lines": [
"NoFound"]}
-
-
TASK [docker : 获取docker版本信息] ***************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/dockerd --version|cut -d' ' -f3",
"delta":
"0:00:10.678626",
"end":
"2021-01-22 03:15:01.781665",
"rc":
0,
"start":
"2021-01-22 03:14:51.103039",
"stderr":
"",
"stderr_lines": [],
"stdout":
"19.03.8,",
"stdout_lines": [
"19.03.8,"]}
-
-
TASK [docker : 转换docker版本信息为浮点数] ***********************************************
-
ok: [
192.168
.110
.185] => {
"ansible_facts": {
"DOCKER_VER":
"19.03"},
"changed":
false}
-
-
TASK [docker : debug info] *****************************************************
-
ok: [
192.168
.110
.185] => {
-
"DOCKER_VER":
"19.03"
-
}
-
-
TASK [docker : 准备docker相关目录] ***************************************************
-
-
TASK [docker : 下载 docker 二进制文件] ************************************************
-
-
TASK [docker : 下载 docker 二进制文件(>=
18.09.x)] ************************************
-
-
TASK [docker : docker命令自动补全] ***************************************************
-
-
TASK [docker : docker国内镜像加速] ***************************************************
-
-
TASK [docker :
flush-iptables] *************************************************
-
-
TASK [docker : 创建docker的systemd unit文件] ****************************************
-
-
TASK [docker : 开机启用docker 服务] **************************************************
-
-
TASK [docker : 开启docker 服务] ****************************************************
-
-
TASK [docker : 轮询等待docker服务运行] *************************************************
-
-
TASK [docker : 配置 docker 命令软链接] ************************************************
-
-
TASK [docker : 下载 docker-tag] **************************************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"52ffe173a29e163aa17aa481a1695e521d1b3386",
"dest":
"/opt/kube/bin/docker-tag",
"gid":
0,
"group":
"root",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/docker-tag",
"size":
1874,
"state":
"file",
"uid":
0}
-
-
TASK [containerd : 获取是否已经安装docker] *********************************************
-
-
TASK [containerd : fail info1] *************************************************
-
-
TASK [containerd : 准备containerd相关目录] *******************************************
-
-
TASK [containerd : 加载内核模块 overlay] *********************************************
-
-
TASK [containerd : 下载 containerd 二进制文件] ****************************************
-
-
TASK [containerd : 创建 containerd 配置文件] *****************************************
-
-
TASK [containerd : 创建systemd unit文件] *******************************************
-
-
TASK [containerd : 创建 crictl 配置] ***********************************************
-
-
TASK [containerd : 开机启用 containerd 服务] *****************************************
-
-
TASK [containerd : 开启 containerd 服务] *******************************************
-
-
TASK [containerd : 轮询等待containerd服务运行] *****************************************
-
-
TASK [containerd : 添加 crictl 命令自动补全] *******************************************
-
-
PLAY [kube-
master] *************************************************************
-
-
TASK [kube-
master : 下载 kube-
master 二进制] ****************************************
-
ok: [
192.168
.110
.185] => (item=kube-apiserver) => {
"changed":
false,
"checksum":
"e719eb6f5782d5c9cd773631c4cdd133cb035441",
"dest":
"/opt/kube/bin/kube-apiserver",
"gid":
0,
"group":
"root",
"item":
"kube-apiserver",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kube-apiserver",
"size":
114032640,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=kube-controller-manager) => {
"changed":
false,
"checksum":
"630501e160737f005d6bbb24d58b3091fb22970f",
"dest":
"/opt/kube/bin/kube-controller-manager",
"gid":
0,
"group":
"root",
"item":
"kube-controller-manager",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kube-controller-manager",
"size":
103809024,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=kube-scheduler) => {
"changed":
false,
"checksum":
"5b17a9209eea52cbe17c65b59eec3007ff8f33be",
"dest":
"/opt/kube/bin/kube-scheduler",
"gid":
0,
"group":
"root",
"item":
"kube-scheduler",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kube-scheduler",
"size":
40697856,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=kubectl) => {
"changed":
false,
"checksum":
"cd810eee071c4c13c104a6d47e42b8eb5dc3d02e",
"dest":
"/opt/kube/bin/kubectl",
"gid":
0,
"group":
"root",
"item":
"kubectl",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kubectl",
"size":
41746432,
"state":
"file",
"uid":
0}
-
-
TASK [kube-
master : 创建 kubernetes 证书签名请求] **************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"9cfdfcd2219743380d872e8b3c8a2e14d78e96ce",
"dest":
"/etc/kubernetes/ssl/kubernetes-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"4b4f6bba5c0b8623410e020f4958a320",
"mode":
"0600",
"owner":
"root",
"size":
469,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285320.72-7361512572201/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-
master : 创建 kubernetes 证书和私钥] ***************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"cd /etc/kubernetes/ssl && /opt/kube/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes kubernetes-csr.json | /opt/kube/bin/cfssljson -bare kubernetes",
"delta":
"0:00:01.093068",
"end":
"2021-01-22 11:15:25.415723",
"rc":
0,
"start":
"2021-01-22 11:15:24.322655",
"stderr":
"2021/01/22 11:15:24 [INFO] generate received request\n2021/01/22 11:15:24 [INFO] received CSR\n2021/01/22 11:15:24 [INFO] generating key: rsa-2048\n2021/01/22 11:15:25 [INFO] encoded CSR\n2021/01/22 11:15:25 [INFO] signed certificate with serial number 208111335699188202293632467486291867272046349538",
"stderr_lines": [
"2021/01/22 11:15:24 [INFO] generate received request",
"2021/01/22 11:15:24 [INFO] received CSR",
"2021/01/22 11:15:24 [INFO] generating key: rsa-2048",
"2021/01/22 11:15:25 [INFO] encoded CSR",
"2021/01/22 11:15:25 [INFO] signed certificate with serial number 208111335699188202293632467486291867272046349538"],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-
master : 创建 aggregator proxy证书签名请求] *********************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"9e11be053f56af7837609360832a7491e895039b",
"dest":
"/etc/kubernetes/ssl/aggregator-proxy-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"546729468988983a52cc44a880c788eb",
"mode":
"0600",
"owner":
"root",
"size":
219,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285325.76-146060894465977/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-
master : 创建 aggregator-proxy证书和私钥] **********************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"cd /etc/kubernetes/ssl && /opt/kube/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes aggregator-proxy-csr.json | /opt/kube/bin/cfssljson -bare aggregator-proxy",
"delta":
"0:00:00.525377",
"end":
"2021-01-22 11:15:29.740397",
"rc":
0,
"start":
"2021-01-22 11:15:29.215020",
"stderr":
"2021/01/22 11:15:29 [INFO] generate received request\n2021/01/22 11:15:29 [INFO] received CSR\n2021/01/22 11:15:29 [INFO] generating key: rsa-2048\n2021/01/22 11:15:29 [INFO] encoded CSR\n2021/01/22 11:15:29 [INFO] signed certificate with serial number 369438182713459372332542940506190681305097821580\n2021/01/22 11:15:29 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for\nwebsites. For more information see the Baseline Requirements for the Issuance and Management\nof Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);\nspecifically, section 10.2.3 (\"Information Requirements\
").",
"stderr_lines": [
"2021/01/22 11:15:29 [INFO] generate received request",
"2021/01/22 11:15:29 [INFO] received CSR",
"2021/01/22 11:15:29 [INFO] generating key: rsa-2048",
"2021/01/22 11:15:29 [INFO] encoded CSR",
"2021/01/22 11:15:29 [INFO] signed certificate with serial number 369438182713459372332542940506190681305097821580",
"2021/01/22 11:15:29 [WARNING] This certificate lacks a \"
hosts\
" field. This makes it unsuitable for",
"websites. For more information see the Baseline Requirements for the Issuance and Management",
"of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);",
"specifically, section 10.2.3 (\"Information Requirements\
")."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-
master : 生成 basic-auth 随机密码] ****************************************
-
-
TASK [kube-
master : 设置 basic-auth 随机密码] ****************************************
-
-
TASK [kube-
master : 创建 basic-auth.csv] *****************************************
-
-
TASK [kube-
master : 替换 kubeconfig 的 apiserver 地址] ******************************
-
ok: [
192.168
.110
.185] => (item=/root/.kube/config) => {
"backup":
"",
"changed":
false,
"item":
"/root/.kube/config",
"msg":
""}
-
ok: [
192.168
.110
.185] => (item=/etc/kubernetes/kube-controller-manager.kubeconfig) => {
"backup":
"",
"changed":
false,
"item":
"/etc/kubernetes/kube-controller-manager.kubeconfig",
"msg":
""}
-
ok: [
192.168
.110
.185] => (item=/etc/kubernetes/kube-scheduler.kubeconfig) => {
"backup":
"",
"changed":
false,
"item":
"/etc/kubernetes/kube-scheduler.kubeconfig",
"msg":
""}
-
-
TASK [kube-
master : 创建
master 服务的 systemd unit 文件] *****************************
-
changed: [
192.168
.110
.185] => (item=kube-apiserver.service) => {
"changed":
true,
"checksum":
"3756c4d48205e54d01640b4b21b09c7b8b67ad95",
"dest":
"/etc/systemd/system/kube-apiserver.service",
"gid":
0,
"group":
"root",
"item":
"kube-apiserver.service",
"md5sum":
"b8e81b3564586c08a5a7bb238da37ee3",
"mode":
"0600",
"owner":
"root",
"size":
1647,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285332.52-244042244655293/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=kube-controller-manager.service) => {
"changed":
true,
"checksum":
"34617d0ac1c8d6b98d7b6e65fd14d33237f314c1",
"dest":
"/etc/systemd/system/kube-controller-manager.service",
"gid":
0,
"group":
"root",
"item":
"kube-controller-manager.service",
"md5sum":
"47b0fe249ff34d28e9f5f36bfa7e2e7b",
"mode":
"0600",
"owner":
"root",
"size":
811,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285335.28-220902006782972/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=kube-scheduler.service) => {
"changed":
true,
"checksum":
"3c55d9aadfecad16e0a26b4209f8d19180c17bea",
"dest":
"/etc/systemd/system/kube-scheduler.service",
"gid":
0,
"group":
"root",
"item":
"kube-scheduler.service",
"md5sum":
"35eff02c956837af2995fc8200476ca3",
"mode":
"0600",
"owner":
"root",
"size":
337,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285338.01-8994363057049/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-
master :
enable
master 服务] ******************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl enable kube-apiserver kube-controller-manager kube-scheduler",
"delta":
"0:00:00.472359",
"end":
"2021-01-22 11:15:42.123179",
"rc":
0,
"start":
"2021-01-22 11:15:41.650820",
"stderr":
"Created symlink /etc/systemd/system/multi-user.target.wants/kube-apiserver.service → /etc/systemd/system/kube-apiserver.service.\nCreated symlink /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service → /etc/systemd/system/kube-controller-manager.service.\nCreated symlink /etc/systemd/system/multi-user.target.wants/kube-scheduler.service → /etc/systemd/system/kube-scheduler.service.",
"stderr_lines": [
"Created symlink /etc/systemd/system/multi-user.target.wants/kube-apiserver.service → /etc/systemd/system/kube-apiserver.service.",
"Created symlink /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service → /etc/systemd/system/kube-controller-manager.service.",
"Created symlink /etc/systemd/system/multi-user.target.wants/kube-scheduler.service → /etc/systemd/system/kube-scheduler.service."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-
master : 启动
master 服务] **********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl daemon-reload && systemctl restart kube-apiserver && systemctl restart kube-controller-manager && systemctl restart kube-scheduler",
"delta":
"0:00:08.180306",
"end":
"2021-01-22 11:15:51.252486",
"rc":
0,
"start":
"2021-01-22 11:15:43.072180",
"stderr":
"",
"stderr_lines": [],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-
master : 以轮询的方式等待
master服务启动完成] **************************************
-
changed: [
192.168
.110
.185] => {
"attempts":
1,
"changed":
true,
"cmd": [
"/opt/kube/bin/kubectl",
"get",
"node"],
"delta":
"0:00:03.446362",
"end":
"2021-01-22 11:15:55.712360",
"rc":
0,
"start":
"2021-01-22 11:15:52.265998",
"stderr":
"No resources found in default namespace.",
"stderr_lines": [
"No resources found in default namespace."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-
master : 配置
admin用户rbac权限] *******************************************
-
-
TASK [kube-
master : 创建
admin用户rbac权限] *******************************************
-
-
TASK [kube-node : 创建kube-node 相关目录] ********************************************
-
changed: [
192.168
.110
.185] => (item=/
var/lib/kubelet) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/var/lib/kubelet",
"mode":
"0700",
"owner":
"root",
"path":
"/var/lib/kubelet",
"size":
6,
"state":
"directory",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=/
var/lib/kube-proxy) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/var/lib/kube-proxy",
"mode":
"0700",
"owner":
"root",
"path":
"/var/lib/kube-proxy",
"size":
6,
"state":
"directory",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=/etc/cni/net.d) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/etc/cni/net.d",
"mode":
"0700",
"owner":
"root",
"path":
"/etc/cni/net.d",
"size":
6,
"state":
"directory",
"uid":
0}
-
-
TASK [kube-node : 下载 kubelet,kube-proxy 二进制和基础 cni plugins] ********************
-
ok: [
192.168
.110
.185] => (item=kubectl) => {
"changed":
false,
"checksum":
"cd810eee071c4c13c104a6d47e42b8eb5dc3d02e",
"dest":
"/opt/kube/bin/kubectl",
"gid":
0,
"group":
"root",
"item":
"kubectl",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kubectl",
"size":
41746432,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=kubelet) => {
"changed":
false,
"checksum":
"7802233ffb09a81ca88d07add3f87a8563663312",
"dest":
"/opt/kube/bin/kubelet",
"gid":
0,
"group":
"root",
"item":
"kubelet",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kubelet",
"size":
106884152,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=kube-proxy) => {
"changed":
false,
"checksum":
"783802c6363fa4587947313ffd61744d6fc79a40",
"dest":
"/opt/kube/bin/kube-proxy",
"gid":
0,
"group":
"root",
"item":
"kube-proxy",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/kube-proxy",
"size":
36306944,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=bridge) => {
"changed":
false,
"checksum":
"b74e69671fc067f1323a8df484834e1f78290bc7",
"dest":
"/opt/kube/bin/bridge",
"gid":
0,
"group":
"root",
"item":
"bridge",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/bridge",
"size":
4632867,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=host-
local) => {
"changed":
false,
"checksum":
"5da3565463c69bab6ff3168d9027ec185056f307",
"dest":
"/opt/kube/bin/host-local",
"gid":
0,
"group":
"root",
"item":
"host-local",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/host-local",
"size":
3662210,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=loopback) => {
"changed":
false,
"checksum":
"7d99f2148a604c36bc2888a4048fef9f7b521b25",
"dest":
"/opt/kube/bin/loopback",
"gid":
0,
"group":
"root",
"item":
"loopback",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/loopback",
"size":
3248884,
"state":
"file",
"uid":
0}
-
-
TASK [kube-node : fail info1] **************************************************
-
-
TASK [kube-node : 安装 haproxy] **************************************************
-
-
TASK [kube-node : 准备离线安装包目录] ***************************************************
-
-
TASK [kube-node : 分发 haproxy_xenial 离线包] ***************************************
-
-
TASK [kube-node : 安装 haproxy_xenial 离线包] ***************************************
-
-
TASK [kube-node : 分发 haproxy_bionic 离线包] ***************************************
-
-
TASK [kube-node : 安装 haproxy_bionic 离线包] ***************************************
-
-
TASK [kube-node : 分发 haproxy_centos7 离线包] **************************************
-
-
TASK [kube-node : 安装 haproxy_centos7 离线包] **************************************
-
-
TASK [kube-node : 分发 haproxy_stretch 离线包] **************************************
-
-
TASK [kube-node : 安装 haproxy_stretch 离线包] **************************************
-
-
TASK [kube-node : 分发 haproxy_buster 离线包] ***************************************
-
-
TASK [kube-node : 安装 haproxy_buster 离线包] ***************************************
-
-
TASK [kube-node : 创建haproxy配置目录] ***********************************************
-
-
TASK [kube-node : 修改centos的haproxy.service] ************************************
-
-
TASK [kube-node : 配置 haproxy] **************************************************
-
-
TASK [kube-node : daemon-reload
for haproxy.service] ***************************
-
-
TASK [kube-node : 开机启用haproxy服务] ***********************************************
-
-
TASK [kube-node : 停止haproxy服务] *************************************************
-
-
TASK [kube-node : 开启haproxy服务] *************************************************
-
-
TASK [kube-node : 替换 kubeconfig 的 apiserver 地址] ********************************
-
-
TASK [kube-node : restart kube-node service] ***********************************
-
-
TASK [kube-node : 替换 kubeconfig 的 apiserver 地址] ********************************
-
ok: [
192.168
.110
.185] => {
"backup":
"",
"changed":
false,
"msg":
""}
-
-
TASK [kube-node : 准备kubelet 证书签名请求] ********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"f4e96e16e362ad184d89bab6257fa843154a6b92",
"dest":
"/etc/kubernetes/ssl/kubelet-csr.json",
"gid":
0,
"group":
"root",
"md5sum":
"1f8d02f4bb0296e0c537d6102639a448",
"mode":
"0600",
"owner":
"root",
"size":
287,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285378.13-191623905873583/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-node : 创建 kubelet 证书与私钥] ********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"cd /etc/kubernetes/ssl && /opt/kube/bin/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem -ca-key=/etc/kubernetes/ssl/ca-key.pem -config=/etc/kubernetes/ssl/ca-config.json -profile=kubernetes kubelet-csr.json | /opt/kube/bin/cfssljson -bare kubelet",
"delta":
"0:00:01.007855",
"end":
"2021-01-22 11:16:22.667121",
"rc":
0,
"start":
"2021-01-22 11:16:21.659266",
"stderr":
"2021/01/22 11:16:21 [INFO] generate received request\n2021/01/22 11:16:21 [INFO] received CSR\n2021/01/22 11:16:21 [INFO] generating key: rsa-2048\n2021/01/22 11:16:22 [INFO] encoded CSR\n2021/01/22 11:16:22 [INFO] signed certificate with serial number 445304256909786498396111411797470935084918211317",
"stderr_lines": [
"2021/01/22 11:16:21 [INFO] generate received request",
"2021/01/22 11:16:21 [INFO] received CSR",
"2021/01/22 11:16:21 [INFO] generating key: rsa-2048",
"2021/01/22 11:16:22 [INFO] encoded CSR",
"2021/01/22 11:16:22 [INFO] signed certificate with serial number 445304256909786498396111411797470935084918211317"],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-node : 设置集群参数] ******************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl config set-cluster kubernetes --certificate-authority=/etc/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.110.185:6443 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig",
"delta":
"0:00:00.131686",
"end":
"2021-01-22 11:16:23.633683",
"rc":
0,
"start":
"2021-01-22 11:16:23.501997",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Cluster \"kubernetes\
" set.",
"stdout_lines": [
"Cluster \"kubernetes\
" set."]}
-
-
TASK [kube-node : 设置客户端认证参数] ***************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl config set-credentials system:node:192.168.110.185 --client-certificate=/etc/kubernetes/ssl/kubelet.pem --embed-certs=true --client-key=/etc/kubernetes/ssl/kubelet-key.pem --kubeconfig=/etc/kubernetes/kubelet.kubeconfig",
"delta":
"0:00:00.138297",
"end":
"2021-01-22 11:16:24.589946",
"rc":
0,
"start":
"2021-01-22 11:16:24.451649",
"stderr":
"",
"stderr_lines": [],
"stdout":
"User \"
system:node:
192.168
.110
.185\
" set.",
"stdout_lines": [
"User \"
system:node:
192.168
.110
.185\
" set."]}
-
-
TASK [kube-node : 设置上下文参数] *****************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl config set-context default --cluster=kubernetes --user=system:node:192.168.110.185 --kubeconfig=/etc/kubernetes/kubelet.kubeconfig",
"delta":
"0:00:00.144452",
"end":
"2021-01-22 11:16:25.556811",
"rc":
0,
"start":
"2021-01-22 11:16:25.412359",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Context \"
default\
" created.",
"stdout_lines": [
"Context \"
default\
" created."]}
-
-
TASK [kube-node : 选择默认上下文] *****************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl config use-context default --kubeconfig=/etc/kubernetes/kubelet.kubeconfig",
"delta":
"0:00:00.138145",
"end":
"2021-01-22 11:16:26.524614",
"rc":
0,
"start":
"2021-01-22 11:16:26.386469",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Switched to context \"
default\
".",
"stdout_lines": [
"Switched to context \"
default\
"."]}
-
-
TASK [kube-node : 准备 cni配置文件] **************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"5f79ffd7a7fa2a012bcb4900675de5e655b21ecd",
"dest":
"/etc/cni/net.d/10-default.conf",
"gid":
0,
"group":
"root",
"md5sum":
"62494519b90aa62591282691d34c71e3",
"mode":
"0600",
"owner":
"root",
"size":
220,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285386.86-208423246376786/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-node : 注册变量 TMP_VER] ************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kube-apiserver --version|cut -d' ' -f2|cut -d'v' -f2",
"delta":
"0:00:00.741939",
"end":
"2021-01-22 03:16:30.887029",
"rc":
0,
"start":
"2021-01-22 03:16:30.145090",
"stderr":
"",
"stderr_lines": [],
"stdout":
"1.18.6",
"stdout_lines": [
"1.18.6"]}
-
-
TASK [kube-node : 获取 kubernetes 主版本号] ******************************************
-
ok: [
192.168
.110
.185] => {
"ansible_facts": {
"KUBE_VER":
"1.18"},
"changed":
false}
-
-
TASK [kube-node : debug info] **************************************************
-
ok: [
192.168
.110
.185] => {
-
"KUBE_VER":
"1.18"
-
}
-
-
TASK [kube-node : 创建kubelet的配置文件] **********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"949b3f840e7f5ae86e2fde37e23897906bd9844a",
"dest":
"/var/lib/kubelet/config.yaml",
"gid":
0,
"group":
"root",
"md5sum":
"16e157978a65de0df42235d7bf160bc1",
"mode":
"0600",
"owner":
"root",
"size":
1594,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285391.91-74131535103660/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-node : 创建kubelet的systemd unit文件] ************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"8ced9ec3d8e83b52ef9e02f6eea1d521789ad73f",
"dest":
"/etc/systemd/system/kubelet.service",
"gid":
0,
"group":
"root",
"md5sum":
"1e231c79e44097cb775eecab32357c96",
"mode":
"0600",
"owner":
"root",
"size":
1549,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285395.02-257729552481856/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-node : 开机启用kubelet 服务] **********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl enable kubelet",
"delta":
"0:00:00.383247",
"end":
"2021-01-22 11:16:39.005793",
"rc":
0,
"start":
"2021-01-22 11:16:38.622546",
"stderr":
"Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /etc/systemd/system/kubelet.service.",
"stderr_lines": [
"Created symlink /etc/systemd/system/multi-user.target.wants/kubelet.service → /etc/systemd/system/kubelet.service."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-node : 开启kubelet 服务] ************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl daemon-reload && systemctl restart kubelet",
"delta":
"0:00:00.505549",
"end":
"2021-01-22 11:16:40.333544",
"rc":
0,
"start":
"2021-01-22 11:16:39.827995",
"stderr":
"",
"stderr_lines": [],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-node : 替换 kube-proxy.kubeconfig 的 apiserver 地址] *********************
-
ok: [
192.168
.110
.185] => {
"backup":
"",
"changed":
false,
"msg":
""}
-
-
TASK [kube-node : 创建kube-proxy 服务文件] *******************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"c56ea62f516d2d485d9ffba1785bae66c05b1815",
"dest":
"/etc/systemd/system/kube-proxy.service",
"gid":
0,
"group":
"root",
"md5sum":
"4cf57df1da899c30886b5d02a0959ae9",
"mode":
"0600",
"owner":
"root",
"size":
687,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285401.58-165534318510006/source",
"state":
"file",
"uid":
0}
-
-
TASK [kube-node : 开机启用kube-proxy 服务] *******************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl enable kube-proxy",
"delta":
"0:00:00.401266",
"end":
"2021-01-22 11:16:45.612835",
"rc":
0,
"start":
"2021-01-22 11:16:45.211569",
"stderr":
"Created symlink /etc/systemd/system/multi-user.target.wants/kube-proxy.service → /etc/systemd/system/kube-proxy.service.",
"stderr_lines": [
"Created symlink /etc/systemd/system/multi-user.target.wants/kube-proxy.service → /etc/systemd/system/kube-proxy.service."],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-node : 开启kube-proxy 服务] *********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"systemctl daemon-reload && systemctl restart kube-proxy",
"delta":
"0:00:00.408215",
"end":
"2021-01-22 11:16:46.871231",
"rc":
0,
"start":
"2021-01-22 11:16:46.463016",
"stderr":
"",
"stderr_lines": [],
"stdout":
"",
"stdout_lines": []}
-
-
TASK [kube-node : 轮询等待kubelet启动] ***********************************************
-
changed: [
192.168
.110
.185] => {
"attempts":
1,
"changed":
true,
"cmd":
"systemctl status kubelet.service|grep Active",
"delta":
"0:00:00.061148",
"end":
"2021-01-22 11:16:47.803463",
"rc":
0,
"start":
"2021-01-22 11:16:47.742315",
"stderr":
"",
"stderr_lines": [],
"stdout":
" Active: active (running) since Fri 2021-01-22 11:16:40 CST; 7s ago",
"stdout_lines": [
" Active: active (running) since Fri 2021-01-22 11:16:40 CST; 7s ago"]}
-
-
TASK [kube-node : 轮询等待node达到Ready状态] *******************************************
-
changed: [
192.168
.110
.185] => {
"attempts":
1,
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl get node 192.168.110.185|awk 'NR>1{print $2}'",
"delta":
"0:00:00.175511",
"end":
"2021-01-22 11:16:48.934991",
"rc":
0,
"start":
"2021-01-22 11:16:48.759480",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Ready",
"stdout_lines": [
"Ready"]}
-
-
TASK [kube-node : 设置node节点
role] ************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl label node 192.168.110.185 kubernetes.io/role=node --overwrite",
"delta":
"0:00:00.185490",
"end":
"2021-01-22 11:16:50.033473",
"rc":
0,
"start":
"2021-01-22 11:16:49.847983",
"stderr":
"",
"stderr_lines": [],
"stdout":
"node/192.168.110.185 labeled",
"stdout_lines": [
"node/192.168.110.185 labeled"]}
-
-
TASK [Making
master nodes SchedulingDisabled] **********************************
-
-
TASK [Setting
master
role
name] ************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl label node 192.168.110.185 kubernetes.io/role=master --overwrite",
"delta":
"0:00:00.191811",
"end":
"2021-01-22 11:16:51.226477",
"rc":
0,
"start":
"2021-01-22 11:16:51.034666",
"stderr":
"",
"stderr_lines": [],
"stdout":
"node/192.168.110.185 labeled",
"stdout_lines": [
"node/192.168.110.185 labeled"]}
-
-
PLAY [kube-node] ***************************************************************
-
-
TASK [kube-node : 创建kube-node 相关目录] ********************************************
-
-
TASK [kube-node : 下载 kubelet,kube-proxy 二进制和基础 cni plugins] ********************
-
-
TASK [kube-node : fail info1] **************************************************
-
-
TASK [kube-node : 安装 haproxy] **************************************************
-
-
TASK [kube-node : 准备离线安装包目录] ***************************************************
-
-
TASK [kube-node : 分发 haproxy_xenial 离线包] ***************************************
-
-
TASK [kube-node : 安装 haproxy_xenial 离线包] ***************************************
-
-
TASK [kube-node : 分发 haproxy_bionic 离线包] ***************************************
-
-
TASK [kube-node : 安装 haproxy_bionic 离线包] ***************************************
-
-
TASK [kube-node : 分发 haproxy_centos7 离线包] **************************************
-
-
TASK [kube-node : 安装 haproxy_centos7 离线包] **************************************
-
-
TASK [kube-node : 分发 haproxy_stretch 离线包] **************************************
-
-
TASK [kube-node : 安装 haproxy_stretch 离线包] **************************************
-
-
TASK [kube-node : 分发 haproxy_buster 离线包] ***************************************
-
-
TASK [kube-node : 安装 haproxy_buster 离线包] ***************************************
-
-
TASK [kube-node : 创建haproxy配置目录] ***********************************************
-
-
TASK [kube-node : 修改centos的haproxy.service] ************************************
-
-
TASK [kube-node : 配置 haproxy] **************************************************
-
-
TASK [kube-node : daemon-reload
for haproxy.service] ***************************
-
-
TASK [kube-node : 开机启用haproxy服务] ***********************************************
-
-
TASK [kube-node : 停止haproxy服务] *************************************************
-
-
TASK [kube-node : 开启haproxy服务] *************************************************
-
-
TASK [kube-node : 替换 kubeconfig 的 apiserver 地址] ********************************
-
-
TASK [kube-node : restart kube-node service] ***********************************
-
-
TASK [kube-node : 替换 kubeconfig 的 apiserver 地址] ********************************
-
-
TASK [kube-node : 准备kubelet 证书签名请求] ********************************************
-
-
TASK [kube-node : 创建 kubelet 证书与私钥] ********************************************
-
-
TASK [kube-node : 设置集群参数] ******************************************************
-
-
TASK [kube-node : 设置客户端认证参数] ***************************************************
-
-
TASK [kube-node : 设置上下文参数] *****************************************************
-
-
TASK [kube-node : 选择默认上下文] *****************************************************
-
-
TASK [kube-node : 准备 cni配置文件] **************************************************
-
-
TASK [kube-node : 注册变量 TMP_VER] ************************************************
-
-
TASK [kube-node : 获取 kubernetes 主版本号] ******************************************
-
-
TASK [kube-node : debug info] **************************************************
-
-
TASK [kube-node : 创建kubelet的配置文件] **********************************************
-
-
TASK [kube-node : 创建kubelet的systemd unit文件] ************************************
-
-
TASK [kube-node : 开机启用kubelet 服务] **********************************************
-
-
TASK [kube-node : 开启kubelet 服务] ************************************************
-
-
TASK [kube-node : 替换 kube-proxy.kubeconfig 的 apiserver 地址] *********************
-
-
TASK [kube-node : 创建kube-proxy 服务文件] *******************************************
-
-
TASK [kube-node : 开机启用kube-proxy 服务] *******************************************
-
-
TASK [kube-node : 开启kube-proxy 服务] *********************************************
-
-
TASK [kube-node : 轮询等待kubelet启动] ***********************************************
-
-
TASK [kube-node : 轮询等待node达到Ready状态] *******************************************
-
-
TASK [kube-node : 设置node节点
role] ************************************************
-
-
PLAY [kube-
master,kube-node] ***************************************************
-
-
TASK [calico : 在节点创建相关目录] ******************************************************
-
-
TASK [calico : 创建calico 证书请求] **************************************************
-
-
TASK [calico : 创建 calico证书和私钥] *************************************************
-
-
TASK [calico :
get calico-etcd-secrets info] ***********************************
-
-
TASK [calico : 创建 calico-etcd-secrets] *****************************************
-
-
TASK [calico : 配置 calico DaemonSet yaml文件] *************************************
-
-
TASK [calico : 检查是否已下载离线calico镜像] **********************************************
-
-
TASK [calico : 尝试推送离线docker 镜像(若执行失败,可忽略)] *************************************
-
-
TASK [calico : 获取calico离线镜像推送情况] ***********************************************
-
-
TASK [calico : 导入 calico的离线镜像(若执行失败,可忽略)] **************************************
-
-
TASK [calico : 导入 calico的离线镜像(若执行失败,可忽略)] **************************************
-
-
TASK [calico : 运行 calico网络] ****************************************************
-
-
TASK [calico : 删除默认cni配置] ******************************************************
-
-
TASK [calico : 下载calicoctl 客户端] ************************************************
-
-
TASK [calico : 准备 calicoctl配置文件] ***********************************************
-
-
TASK [calico : 轮询等待calico-node 运行,视下载镜像速度而定] ***********************************
-
-
TASK [cilium : 转换内核版本为浮点数] *****************************************************
-
-
TASK [cilium : 检查内核版本>
4.9] *****************************************************
-
-
TASK [cilium : node 节点创建cilium 相关目录] *******************************************
-
-
TASK [cilium : 配置 cilium DaemonSet yaml文件] *************************************
-
-
TASK [cilium : Optional-
Mount BPF FS] ******************************************
-
-
TASK [cilium : 检查是否已下载离线cilium镜像] **********************************************
-
-
TASK [cilium : 尝试推送离线docker 镜像(若执行失败,可忽略)] *************************************
-
-
TASK [cilium : 获取cilium离线镜像推送情况] ***********************************************
-
-
TASK [cilium : 导入 cilium的离线镜像(若执行失败,可忽略)] **************************************
-
-
TASK [cilium : 导入 cilium的离线镜像(若执行失败,可忽略)] **************************************
-
-
TASK [cilium : 运行 cilium网络] ****************************************************
-
-
TASK [cilium : 删除默认cni配置] ******************************************************
-
-
TASK [cilium : 轮询等待cilium-node 运行,视下载镜像速度而定] ***********************************
-
-
TASK [flannel : 创建flannel 相关目录] ************************************************
-
ok: [
192.168
.110
.185] => (item=/etc/cni/net.d) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/etc/cni/net.d",
"mode":
"0700",
"owner":
"root",
"path":
"/etc/cni/net.d",
"size":
29,
"state":
"directory",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=/opt/kube/images) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/opt/kube/images",
"mode":
"0700",
"owner":
"root",
"path":
"/opt/kube/images",
"size":
170,
"state":
"directory",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=/opt/kube/kube-
system) => {
"changed":
true,
"gid":
0,
"group":
"root",
"item":
"/opt/kube/kube-system",
"mode":
"0700",
"owner":
"root",
"path":
"/opt/kube/kube-system",
"size":
6,
"state":
"directory",
"uid":
0}
-
-
TASK [flannel : 配置 flannel DaemonSet yaml文件] ***********************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"checksum":
"931ab9c9e352654877379c1e59963d3218351688",
"dest":
"/opt/kube/kube-system/flannel.yaml",
"gid":
0,
"group":
"root",
"md5sum":
"f9ff0130e63fb93839066fd0cdf3de68",
"mode":
"0600",
"owner":
"root",
"size":
5004,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285425.91-124312103923801/source",
"state":
"file",
"uid":
0}
-
-
TASK [flannel : 下载flannel cni plugins] *****************************************
-
ok: [
192.168
.110
.185] => (item=bridge) => {
"changed":
false,
"checksum":
"b74e69671fc067f1323a8df484834e1f78290bc7",
"dest":
"/opt/kube/bin/bridge",
"gid":
0,
"group":
"root",
"item":
"bridge",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/bridge",
"size":
4632867,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=flannel) => {
"changed":
false,
"checksum":
"ac163c752fb962190d7f7950c74cf111a2389758",
"dest":
"/opt/kube/bin/flannel",
"gid":
0,
"group":
"root",
"item":
"flannel",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/flannel",
"size":
3104690,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=host-
local) => {
"changed":
false,
"checksum":
"5da3565463c69bab6ff3168d9027ec185056f307",
"dest":
"/opt/kube/bin/host-local",
"gid":
0,
"group":
"root",
"item":
"host-local",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/host-local",
"size":
3662210,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=loopback) => {
"changed":
false,
"checksum":
"7d99f2148a604c36bc2888a4048fef9f7b521b25",
"dest":
"/opt/kube/bin/loopback",
"gid":
0,
"group":
"root",
"item":
"loopback",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/loopback",
"size":
3248884,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=portmap) => {
"changed":
false,
"checksum":
"3ac7fad28094c24013d2ec80e0c79b35b3551fdb",
"dest":
"/opt/kube/bin/portmap",
"gid":
0,
"group":
"root",
"item":
"portmap",
"mode":
"0755",
"owner":
"root",
"path":
"/opt/kube/bin/portmap",
"size":
3941930,
"state":
"file",
"uid":
0}
-
-
TASK [flannel : 检查是否已下载离线flannel镜像] ********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"ls",
"/etc/ansible/down"],
"delta":
"0:00:00.628610",
"end":
"2021-01-22 03:17:21.059231",
"rc":
0,
"start":
"2021-01-22 03:17:20.430621",
"stderr":
"",
"stderr_lines": [],
"stdout":
"calico_v3.8.8.tar\ncoredns_1.6.7.tar\ndashboard_v2.0.1.tar\ndocker\ndocker-19.03.8.tgz\nflannel_v0.12.0.tar\nkubeasz_2.2.1.tar\nmetrics-scraper_v1.0.4.tar\nmetrics-server_v0.3.6.tar\npause.tar\npause_3.2.tar",
"stdout_lines": [
"calico_v3.8.8.tar",
"coredns_1.6.7.tar",
"dashboard_v2.0.1.tar",
"docker",
"docker-19.03.8.tgz",
"flannel_v0.12.0.tar",
"kubeasz_2.2.1.tar",
"metrics-scraper_v1.0.4.tar",
"metrics-server_v0.3.6.tar",
"pause.tar",
"pause_3.2.tar"]}
-
-
TASK [flannel : 尝试推送离线docker 镜像(若执行失败,可忽略)] ************************************
-
ok: [
192.168
.110
.185] => (item=pause.tar) => {
"changed":
false,
"checksum":
"b2e9d3666e9471730bdf50048495ad1f98bb4dd4",
"dest":
"/opt/kube/images/pause.tar",
"gid":
0,
"group":
"root",
"item":
"pause.tar",
"mode":
"0600",
"owner":
"root",
"path":
"/opt/kube/images/pause.tar",
"size":
494080,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=flannel_v0
.12
.0.tar) => {
"changed":
false,
"checksum":
"0661a4ad8cb848304a64eefd7a9af501df03529a",
"dest":
"/opt/kube/images/flannel_v0.12.0.tar",
"gid":
0,
"group":
"root",
"item":
"flannel_v0.12.0.tar",
"mode":
"0600",
"owner":
"root",
"path":
"/opt/kube/images/flannel_v0.12.0.tar",
"size":
54602752,
"state":
"file",
"uid":
0}
-
-
TASK [flannel : 获取flannel离线镜像推送情况] *********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"ls",
"/opt/kube/images"],
"delta":
"0:00:01.045116",
"end":
"2021-01-22 11:17:28.079418",
"rc":
0,
"start":
"2021-01-22 11:17:27.034302",
"stderr":
"",
"stderr_lines": [],
"stdout":
"coredns_1.6.7.tar\ndashboard_v2.0.1.tar\nflannel_v0.12.0.tar\nmetrics-scraper_v1.0.4.tar\nmetrics-server_v0.3.6.tar\npause.tar",
"stdout_lines": [
"coredns_1.6.7.tar",
"dashboard_v2.0.1.tar",
"flannel_v0.12.0.tar",
"metrics-scraper_v1.0.4.tar",
"metrics-server_v0.3.6.tar",
"pause.tar"]}
-
-
TASK [flannel : 导入 flannel的离线镜像(若执行失败,可忽略)] ************************************
-
changed: [
192.168
.110
.185] => (item=pause.tar) => {
"changed":
true,
"cmd":
"/opt/kube/bin/docker load -i /opt/kube/images/pause.tar",
"delta":
"0:00:00.278472",
"end":
"2021-01-22 11:17:29.236663",
"item":
"pause.tar",
"rc":
0,
"start":
"2021-01-22 11:17:28.958191",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Loaded image: r6w9c7qa.mirror.aliyuncs.com/kubesphere/pause-arm64:3.2",
"stdout_lines": [
"Loaded image: r6w9c7qa.mirror.aliyuncs.com/kubesphere/pause-arm64:3.2"]}
-
changed: [
192.168
.110
.185] => (item=flannel_v0
.12
.0.tar) => {
"changed":
true,
"cmd":
"/opt/kube/bin/docker load -i /opt/kube/images/flannel_v0.12.0.tar",
"delta":
"0:00:00.415576",
"end":
"2021-01-22 11:17:30.188709",
"item":
"flannel_v0.12.0.tar",
"rc":
0,
"start":
"2021-01-22 11:17:29.773133",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Loaded image: kubesphere/flannel:v0.12.0",
"stdout_lines": [
"Loaded image: kubesphere/flannel:v0.12.0"]}
-
-
TASK [flannel : 导入 flannel的离线镜像(若执行失败,可忽略)] ************************************
-
-
TASK [flannel : 运行 flannel网络] **************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl apply -f /opt/kube/kube-system/flannel.yaml",
"delta":
"0:00:01.050698",
"end":
"2021-01-22 11:17:32.316268",
"rc":
0,
"start":
"2021-01-22 11:17:31.265570",
"stderr":
"",
"stderr_lines": [],
"stdout":
"podsecuritypolicy.policy/psp.flannel.unprivileged created\nclusterrole.rbac.authorization.k8s.io/flannel created\nclusterrolebinding.rbac.authorization.k8s.io/flannel created\nserviceaccount/flannel created\nconfigmap/kube-flannel-cfg created\ndaemonset.apps/kube-flannel-ds-arm64 created",
"stdout_lines": [
"podsecuritypolicy.policy/psp.flannel.unprivileged created",
"clusterrole.rbac.authorization.k8s.io/flannel created",
"clusterrolebinding.rbac.authorization.k8s.io/flannel created",
"serviceaccount/flannel created",
"configmap/kube-flannel-cfg created",
"daemonset.apps/kube-flannel-ds-arm64 created"]}
-
-
TASK [flannel : 删除默认cni配置] *****************************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"path":
"/etc/cni/net.d/10-default.conf",
"state":
"absent"}
-
-
TASK [flannel : 轮询等待flannel 运行,视下载镜像速度而定] **************************************
-
FAILED - RETRYING: 轮询等待flannel 运行,视下载镜像速度而定 (
15 retries
left).
-
changed: [
192.168
.110
.185] => {
"attempts":
2,
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl get pod -n kube-system -o wide|grep 'flannel'|grep ' 192.168.110.185 '|awk '{print $3}'",
"delta":
"0:00:00.171550",
"end":
"2021-01-22 11:17:43.028088",
"rc":
0,
"start":
"2021-01-22 11:17:42.856538",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Running",
"stdout_lines": [
"Running"]}
-
-
TASK [kube-router : 创建cni 和kube-router 相关目录] ***********************************
-
-
TASK [kube-router : 准备配置 kube-router DaemonSet (
without IPVS)] *****************
-
-
TASK [kube-router : 下载cni plugins] *********************************************
-
-
TASK [kube-router : 检查是否已下载离线kube-router镜像] ************************************
-
-
TASK [kube-router : 尝试推送离线docker 镜像(若执行失败,可忽略)] ********************************
-
-
TASK [kube-router : 获取kube-router离线镜像推送情况] *************************************
-
-
TASK [kube-router : 导入 kube-router的离线镜像(若执行失败,可忽略)] ****************************
-
-
TASK [kube-router : 导入 kube-router的离线镜像(若执行失败,可忽略)] ****************************
-
-
TASK [kube-router : 运行 kube-router DaemonSet] **********************************
-
-
TASK [kube-router : 删除默认cni配置] *************************************************
-
-
TASK [kube-router : 轮询等待kube-router 运行,视下载镜像速度而定] ******************************
-
-
TASK [kube-ovn : 创建相关目录] *******************************************************
-
-
TASK [kube-ovn : 配置 crd.yaml 文件] ***********************************************
-
-
TASK [kube-ovn : 配置 kube-ovn.yaml 文件] ******************************************
-
-
TASK [kube-ovn : 配置 ovn.yaml 文件] ***********************************************
-
-
TASK [kube-ovn : 配置 kubectl
plugin] ********************************************
-
-
TASK [kube-ovn : 检查是否已下载离线kube_ovn镜像] ******************************************
-
-
TASK [kube-ovn : 尝试推送离线docker 镜像(若执行失败,可忽略)] ***********************************
-
-
TASK [kube-ovn : 获取kube_ovn离线镜像推送情况] *******************************************
-
-
TASK [kube-ovn : 导入 kube_ovn的离线镜像(若执行失败,可忽略)] **********************************
-
-
TASK [kube-ovn : 导入 kube_ovn的离线镜像(若执行失败,可忽略)] **********************************
-
-
TASK [kube-ovn : 运行 kube-ovn网络] ************************************************
-
-
TASK [kube-ovn : 删除默认cni配置] ****************************************************
-
-
TASK [kube-ovn : 轮询等待kube-ovn 运行,视下载镜像速度而定] ************************************
-
-
PLAY [kube-node] ***************************************************************
-
-
TASK [cluster-addon : 在 node 节点创建相关目录] *****************************************
-
ok: [
192.168
.110
.185] => (item=/opt/kube/kube-
system) => {
"changed":
false,
"gid":
0,
"group":
"root",
"item":
"/opt/kube/kube-system",
"mode":
"0700",
"owner":
"root",
"path":
"/opt/kube/kube-system",
"size":
26,
"state":
"directory",
"uid":
0}
-
-
TASK [cluster-addon : 准备 DNS的部署文件] *********************************************
-
changed: [
192.168
.110
.185] => (item=kubedns) => {
"changed":
true,
"checksum":
"3d32c789f50cc86c114e28ca3411b900accb3317",
"dest":
"/opt/kube/kube-system/kubedns.yaml",
"gid":
0,
"group":
"root",
"item":
"kubedns",
"md5sum":
"5ce22d0871dbc7e47bbdd7a6d6caffd1",
"mode":
"0600",
"owner":
"root",
"size":
5481,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285468.27-186923836764148/source",
"state":
"file",
"uid":
0}
-
changed: [
192.168
.110
.185] => (item=coredns) => {
"changed":
true,
"checksum":
"313cac155376104d36d0d80466bcc8866c047dba",
"dest":
"/opt/kube/kube-system/coredns.yaml",
"gid":
0,
"group":
"root",
"item":
"coredns",
"md5sum":
"cdc8720d5142f13c0b0812981c4d1e93",
"mode":
"0600",
"owner":
"root",
"size":
4072,
"src":
"/root/.ansible/tmp/ansible-tmp-1611285471.07-256822445482670/source",
"state":
"file",
"uid":
0}
-
-
TASK [cluster-addon : 获取所有已经创建的POD信息] ******************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"/opt/kube/bin/kubectl",
"get",
"pod",
"--all-namespaces"],
"delta":
"0:00:00.164119",
"end":
"2021-01-22 11:17:54.910297",
"rc":
0,
"start":
"2021-01-22 11:17:54.746178",
"stderr":
"",
"stderr_lines": [],
"stdout":
"NAMESPACE NAME READY STATUS RESTARTS AGE\nkube-system kube-flannel-ds-arm64-ws4mf 1/1 Running 0 22s",
"stdout_lines": [
"NAMESPACE NAME READY STATUS RESTARTS AGE",
"kube-system kube-flannel-ds-arm64-ws4mf 1/1 Running 0 22s"]}
-
-
TASK [cluster-addon : 获取已下载离线镜像信息] *********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"ls",
"/etc/ansible/down"],
"delta":
"0:00:00.576836",
"end":
"2021-01-22 03:17:55.902753",
"rc":
0,
"start":
"2021-01-22 03:17:55.325917",
"stderr":
"",
"stderr_lines": [],
"stdout":
"calico_v3.8.8.tar\ncoredns_1.6.7.tar\ndashboard_v2.0.1.tar\ndocker\ndocker-19.03.8.tgz\nflannel_v0.12.0.tar\nkubeasz_2.2.1.tar\nmetrics-scraper_v1.0.4.tar\nmetrics-server_v0.3.6.tar\npause.tar\npause_3.2.tar",
"stdout_lines": [
"calico_v3.8.8.tar",
"coredns_1.6.7.tar",
"dashboard_v2.0.1.tar",
"docker",
"docker-19.03.8.tgz",
"flannel_v0.12.0.tar",
"kubeasz_2.2.1.tar",
"metrics-scraper_v1.0.4.tar",
"metrics-server_v0.3.6.tar",
"pause.tar",
"pause_3.2.tar"]}
-
-
TASK [cluster-addon : 尝试推送离线coredns镜像(若执行失败,可忽略)] ******************************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"83abcde9bf732753932eb7798cb8aab310d44d94",
"dest":
"/opt/kube/images/coredns_1.6.7.tar",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/opt/kube/images/coredns_1.6.7.tar",
"size":
41638400,
"state":
"file",
"uid":
0}
-
-
TASK [cluster-addon : 获取coredns离线镜像推送情况] ***************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"ls",
"/opt/kube/images"],
"delta":
"0:00:01.044758",
"end":
"2021-01-22 11:18:00.663170",
"rc":
0,
"start":
"2021-01-22 11:17:59.618412",
"stderr":
"",
"stderr_lines": [],
"stdout":
"coredns_1.6.7.tar\ndashboard_v2.0.1.tar\nflannel_v0.12.0.tar\nmetrics-scraper_v1.0.4.tar\nmetrics-server_v0.3.6.tar\npause.tar",
"stdout_lines": [
"coredns_1.6.7.tar",
"dashboard_v2.0.1.tar",
"flannel_v0.12.0.tar",
"metrics-scraper_v1.0.4.tar",
"metrics-server_v0.3.6.tar",
"pause.tar"]}
-
-
TASK [cluster-addon : 导入coredns的离线镜像(若执行失败,可忽略)] *******************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/docker load -i /opt/kube/images/coredns_1.6.7.tar",
"delta":
"0:00:00.389613",
"end":
"2021-01-22 11:18:01.960683",
"rc":
0,
"start":
"2021-01-22 11:18:01.571070",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Loaded image: coredns/coredns:1.6.7",
"stdout_lines": [
"Loaded image: coredns/coredns:1.6.7"]}
-
-
TASK [cluster-addon : 导入coredns的离线镜像(若执行失败,可忽略)] *******************************
-
-
TASK [cluster-addon : 创建coredns部署] *********************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/kubectl apply -f /opt/kube/kube-system/coredns.yaml",
"delta":
"0:00:01.151260",
"end":
"2021-01-22 11:18:04.213792",
"rc":
0,
"start":
"2021-01-22 11:18:03.062532",
"stderr":
"",
"stderr_lines": [],
"stdout":
"serviceaccount/coredns created\nclusterrole.rbac.authorization.k8s.io/system:coredns created\nclusterrolebinding.rbac.authorization.k8s.io/system:coredns created\nconfigmap/coredns created\ndeployment.apps/coredns created\nservice/kube-dns created",
"stdout_lines": [
"serviceaccount/coredns created",
"clusterrole.rbac.authorization.k8s.io/system:coredns created",
"clusterrolebinding.rbac.authorization.k8s.io/system:coredns created",
"configmap/coredns created",
"deployment.apps/coredns created",
"service/kube-dns created"]}
-
-
TASK [cluster-addon : 尝试推送离线 metrics-
server镜像(若执行失败,可忽略)] **********************
-
ok: [
192.168
.110
.185] => {
"changed":
false,
"checksum":
"44499ac0d1db54332283e87afa1a320783641853",
"dest":
"/opt/kube/images/metrics-server_v0.3.6.tar",
"gid":
0,
"group":
"root",
"mode":
"0600",
"owner":
"root",
"path":
"/opt/kube/images/metrics-server_v0.3.6.tar",
"size":
39563264,
"state":
"file",
"uid":
0}
-
-
TASK [cluster-addon : 获取metrics-
server离线镜像推送情况] ********************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"ls",
"/opt/kube/images"],
"delta":
"0:00:00.044178",
"end":
"2021-01-22 11:18:08.011182",
"rc":
0,
"start":
"2021-01-22 11:18:07.967004",
"stderr":
"",
"stderr_lines": [],
"stdout":
"coredns_1.6.7.tar\ndashboard_v2.0.1.tar\nflannel_v0.12.0.tar\nmetrics-scraper_v1.0.4.tar\nmetrics-server_v0.3.6.tar\npause.tar",
"stdout_lines": [
"coredns_1.6.7.tar",
"dashboard_v2.0.1.tar",
"flannel_v0.12.0.tar",
"metrics-scraper_v1.0.4.tar",
"metrics-server_v0.3.6.tar",
"pause.tar"]}
-
-
TASK [cluster-addon : 导入 metrics-
server的离线镜像(若执行失败,可忽略)] ***********************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/opt/kube/bin/docker load -i /opt/kube/images/metrics-server_v0.3.6.tar",
"delta":
"0:00:00.352552",
"end":
"2021-01-22 11:18:09.285986",
"rc":
0,
"start":
"2021-01-22 11:18:08.933434",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Loaded image: mirrorgooglecontainers/metrics-server-arm64:v0.3.6",
"stdout_lines": [
"Loaded image: mirrorgooglecontainers/metrics-server-arm64:v0.3.6"]}
-
-
TASK [cluster-addon : 导入 metrics-
server的离线镜像(若执行失败,可忽略)] ***********************
-
-
TASK [cluster-addon : 创建 metrics-
server部署] *************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl apply -f /etc/ansible/manifests/metrics-server",
"delta":
"0:00:01.446657",
"end":
"2021-01-22 03:18:11.331587",
"rc":
0,
"start":
"2021-01-22 03:18:09.884930",
"stderr":
"",
"stderr_lines": [],
"stdout":
"clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created\nclusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created\nrolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created\napiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created\nserviceaccount/metrics-server created\ndeployment.apps/metrics-server created\nservice/metrics-server created\nclusterrole.rbac.authorization.k8s.io/system:metrics-server created\nclusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created",
"stdout_lines": [
"clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created",
"clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created",
"rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created",
"apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created",
"serviceaccount/metrics-server created",
"deployment.apps/metrics-server created",
"service/metrics-server created",
"clusterrole.rbac.authorization.k8s.io/system:metrics-server created",
"clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created"]}
-
-
TASK [cluster-addon : 尝试推送离线 dashboard 镜像(若执行失败,可忽略)] **************************
-
ok: [
192.168
.110
.185] => (item=dashboard_v2
.0
.1.tar) => {
"changed":
false,
"checksum":
"b6e29dcac0f79b3cf2b2981fb09631852d4dec70",
"dest":
"/opt/kube/images/dashboard_v2.0.1.tar",
"gid":
0,
"group":
"root",
"item":
"dashboard_v2.0.1.tar",
"mode":
"0600",
"owner":
"root",
"path":
"/opt/kube/images/dashboard_v2.0.1.tar",
"size":
223211008,
"state":
"file",
"uid":
0}
-
ok: [
192.168
.110
.185] => (item=metrics-scraper_v1
.0
.4.tar) => {
"changed":
false,
"checksum":
"f48fc392d604bdcb70ea2d54aacf7c230210f1c6",
"dest":
"/opt/kube/images/metrics-scraper_v1.0.4.tar",
"gid":
0,
"group":
"root",
"item":
"metrics-scraper_v1.0.4.tar",
"mode":
"0600",
"owner":
"root",
"path":
"/opt/kube/images/metrics-scraper_v1.0.4.tar",
"size":
35186176,
"state":
"file",
"uid":
0}
-
-
TASK [cluster-addon : 获取dashboard离线镜像推送情况] *************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd": [
"ls",
"/opt/kube/images"],
"delta":
"0:00:01.042300",
"end":
"2021-01-22 11:18:20.513088",
"rc":
0,
"start":
"2021-01-22 11:18:19.470788",
"stderr":
"",
"stderr_lines": [],
"stdout":
"coredns_1.6.7.tar\ndashboard_v2.0.1.tar\nflannel_v0.12.0.tar\nmetrics-scraper_v1.0.4.tar\nmetrics-server_v0.3.6.tar\npause.tar",
"stdout_lines": [
"coredns_1.6.7.tar",
"dashboard_v2.0.1.tar",
"flannel_v0.12.0.tar",
"metrics-scraper_v1.0.4.tar",
"metrics-server_v0.3.6.tar",
"pause.tar"]}
-
-
TASK [cluster-addon : 导入 dashboard 的离线镜像(docker)] ******************************
-
changed: [
192.168
.110
.185] => (item=dashboard_v2
.0
.1.tar) => {
"changed":
true,
"cmd":
"/opt/kube/bin/docker load -i /opt/kube/images/dashboard_v2.0.1.tar",
"delta":
"0:00:00.736832",
"end":
"2021-01-22 11:18:22.140812",
"item":
"dashboard_v2.0.1.tar",
"rc":
0,
"start":
"2021-01-22 11:18:21.403980",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Loaded image: kubernetesui/dashboard-arm64:v2.0.1",
"stdout_lines": [
"Loaded image: kubernetesui/dashboard-arm64:v2.0.1"]}
-
changed: [
192.168
.110
.185] => (item=metrics-scraper_v1
.0
.4.tar) => {
"changed":
true,
"cmd":
"/opt/kube/bin/docker load -i /opt/kube/images/metrics-scraper_v1.0.4.tar",
"delta":
"0:00:00.245267",
"end":
"2021-01-22 11:18:22.959681",
"item":
"metrics-scraper_v1.0.4.tar",
"rc":
0,
"start":
"2021-01-22 11:18:22.714414",
"stderr":
"",
"stderr_lines": [],
"stdout":
"Loaded image: kubernetesui/metrics-scraper-arm64:v1.0.4",
"stdout_lines": [
"Loaded image: kubernetesui/metrics-scraper-arm64:v1.0.4"]}
-
-
TASK [cluster-addon : 导入 dashboard 的离线镜像(containerd)] **************************
-
-
TASK [cluster-addon : 创建 dashboard部署] ******************************************
-
changed: [
192.168
.110
.185] => {
"changed":
true,
"cmd":
"/etc/ansible/bin/kubectl apply -f /etc/ansible/manifests/dashboard",
"delta":
"0:00:02.213289",
"end":
"2021-01-22 03:18:25.800979",
"rc":
0,
"start":
"2021-01-22 03:18:23.587690",
"stderr":
"Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply",
"stderr_lines": [
"Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply"],
"stdout":
"serviceaccount/admin-user created\nclusterrolebinding.rbac.authorization.k8s.io/admin-user created\nnamespace/kube-system configured\nserviceaccount/kubernetes-dashboard created\nservice/kubernetes-dashboard created\nsecret/kubernetes-dashboard-certs created\nsecret/kubernetes-dashboard-csrf created\nsecret/kubernetes-dashboard-key-holder created\nconfigmap/kubernetes-dashboard-settings created\nrole.rbac.authorization.k8s.io/kubernetes-dashboard created\nclusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created\nrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created\nclusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created\ndeployment.apps/kubernetes-dashboard created\nservice/dashboard-metrics-scraper created\ndeployment.apps/dashboard-metrics-scraper created\nserviceaccount/dashboard-read-user created\nclusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created\nclusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created",
"stdout_lines": [
"serviceaccount/admin-user created",
"clusterrolebinding.rbac.authorization.k8s.io/admin-user created",
"namespace/kube-system configured",
"serviceaccount/kubernetes-dashboard created",
"service/kubernetes-dashboard created",
"secret/kubernetes-dashboard-certs created",
"secret/kubernetes-dashboard-csrf created",
"secret/kubernetes-dashboard-key-holder created",
"configmap/kubernetes-dashboard-settings created",
"role.rbac.authorization.k8s.io/kubernetes-dashboard created",
"clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created",
"rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created",
"clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created",
"deployment.apps/kubernetes-dashboard created",
"service/dashboard-metrics-scraper created",
"deployment.apps/dashboard-metrics-scraper created",
"serviceaccount/dashboard-read-user created",
"clusterrolebinding.rbac.authorization.k8s.io/dashboard-read-binding created",
"clusterrole.rbac.authorization.k8s.io/dashboard-read-clusterrole created"]}
-
-
PLAY RECAP *********************************************************************
-
192.168
.110
.185 : ok=
98
changed=
68 unreachable=
0
failed=
0
-
localhost : ok=
34
changed=
30 unreachable=
0
failed=
0
-
-
[INFO]
save
context: aio
-
[INFO]
save aio
roles
' configration
-
[INFO] save aio ansible hosts
-
[INFO] save aio kubeconfig
-
[INFO] save aio kube-proxy.kubeconfig
-
[INFO] save aio certs
-
[INFO] Action successed : start-aio
-
检查部署状态
新开一个终端(用于加载新的环境变量,找到kubectl等指令)
查看所有pod
-
[
yeqiang@192-168-110-185
kubeasz-arm64]
$
sudo
su
-
[
sudo]
yeqiang
的密码:
-
[
root@192-168-110-185
kubeasz-arm64]
# kubectl get pods -A
-
NAMESPACE
NAME
READY
STATUS
RESTARTS
AGE
-
kube-system
coredns-65dbdb44db-nphfw
1
/1
Running
0
2m38s
-
kube-system
dashboard-metrics-scraper-795b67bcfd-dtbkr
1
/1
Running
0
2m16s
-
kube-system
kube-flannel-ds-arm64-ws4mf
1
/1
Running
0
3m9s
-
kube-system
kubernetes-dashboard-7cf666b9-wsv5s
1
/1
Running
0
2m16s
-
kube-system
metrics-server-854d95976b-smbrs
1
/1
Running
0
2m29s
-
[
root@192-168-110-185
kubeasz-arm64]
#
查看dashborad 服务端口
-
[
root@192-168-110-185
kubeasz-arm64]
# kubectl get svc -n kube-system | grep kubernetes-dashboard
-
kubernetes-dashboard
NodePort
10.68
.49
.30
<none>
443
:23499/TCP
4m25s
本机火狐浏览器打开地址:
获取token
-
[
root@
192-
168-
110-
185 桌面]# kubectl
get secret -n kube-system| grep admin-user | awk
'{print $1}' | xargs -i kubectl describe secret -n kube-system {} | grep token: | awk
'{print $2}'
-
eyJhbGciOiJSUzI1NiIsImtpZCI6InZFLTdsRUU5UWxkcW93WWpHUlplbzR6eGYwV0E0WGxlWWJKVm5XakpzZ2sifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlLXN5c3RlbSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLXgyc21wIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyODBhYjVkMi00MjgxGTRmMmItODNmZC1kMWRkNzY4Mjk4M2MiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZS1zeXN0ZW06YWRtaW4tdXNlciJ9.Qn2xSZiWo3HOpJ-
1_cNw7Bw4X4zWFP2Wg7DukqzdgjyZD0PTnwtOpCA9fbwg93xBlPNGXQN-zDXRgMVGoxcAAn3ueOQUXMunDiVba2wdDX68DjX4UH3Q2dhiWJJW0yt1JN5eleFd4EzQB4d_tR1xFKJ0W2M-
3k8ZWHcxH7tcbcXyslyLdALJ8f4NDMP5mBWfXgZtiAHBKlGV9GKVeVn4B4R4JII3O40J80u5fW6-Ki7z0pf7OtkGobsD6XDap0piBiYfGIRjBYqmH93vzSjKmB29I8aivhueJ2atVF33lbs7Y699QyN2PBzPK9TEF9yvb5uCaIjruYg40b3JQU-zzg
总结
关于防火墙:
由于当前版本firewalld与k8s有一定兼容性问题,为了简单起见,直接卸载防火墙重启
-
sudo
yum remove firewalld -y
-
reboot
卸载后,可正常访问kubernetes-dashboard
由于Docker良好的隔离及封装,银河麒麟高级服务器操作系统V10上部署运行k8s只需要适配部分镜像、编译arm64响应程序即可。
转载:https://blog.csdn.net/m0_46573967/article/details/112935319
查看评论