搭建Kubesphere容器管理平台

1、nfs持久化存储

#安装nfs文件系统(k8s-master安装)yum install -y nfs-utilsmkdir /nfs/data/ -pecho "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exportssystemctl enable rpcbindsystemctl enable nfs-serversystemctl start rpcbindsystemctl start nfs-serverexportfs -r#检查配置是否生效exportfs#配置nfs-clientshowmount -e 192.168.100.44mkdir -p /nfs/datamount -t nfs 192.168.100.44:/nfs/data /nfs/data

# 配置默认存储

cat sc.yaml---## 创建了一个存储类apiVersion: storage.k8s.io/v1kind: StorageClassmetadata:  name: nfs-storage  annotations:    storageclass.kubernetes.io/is-default-class: "true"provisioner: k8s-sigs.io/nfs-subdir-external-provisionerparameters:  archiveOnDelete: "true"  # 删除pv的时候,pv的内容是否要备份 ---apiVersion: apps/v1kind: Deploymentmetadata:  name: nfs-client-provisioner  labels:    app: nfs-client-provisioner  # replace with namespace where provisioner is deployed  namespace: defaultspec:  replicas: 1  strategy:    type: Recreate  selector:    matchLabels:      app: nfs-client-provisioner  template:    metadata:      labels:        app: nfs-client-provisioner    spec:      serviceAccountName: nfs-client-provisioner      containers:        - name: nfs-client-provisioner          image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2          # resources:          #    limits:          #      cpu: 10m          #    requests:          #      cpu: 10m          volumeMounts:            - name: nfs-client-root              mountPath: /persistentvolumes          env:            - name: PROVISIONER_NAME              value: k8s-sigs.io/nfs-subdir-external-provisioner            - name: NFS_SERVER              value: 192.168.100.44 ## 指定自己nfs服务器地址            - name: NFS_PATH                value: /nfs/data  ## nfs服务器共享的目录      volumes:        - name: nfs-client-root          nfs:            server: 192.168.100.44            path: /nfs/data---apiVersion: v1kind: ServiceAccountmetadata:  name: nfs-client-provisioner  # replace with namespace where provisioner is deployed  namespace: default---kind: ClusterRoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: nfs-client-provisioner-runnerrules:  - apiGroups: [""]    resources: ["nodes"]    verbs: ["get", "list", "watch"]  - apiGroups: [""]    resources: ["persistentvolumes"]    verbs: ["get", "list", "watch", "create", "delete"]  - apiGroups: [""]    resources: ["persistentvolumeclaims"]    verbs: ["get", "list", "watch", "update"]  - apiGroups: ["storage.k8s.io"]    resources: ["storageclasses"]    verbs: ["get", "list", "watch"]  - apiGroups: [""]    resources: ["events"]    verbs: ["create", "update", "patch"]---kind: ClusterRoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata:  name: run-nfs-client-provisionersubjects:  - kind: ServiceAccount    name: nfs-client-provisioner    # replace with namespace where provisioner is deployed    namespace: defaultroleRef:  kind: ClusterRole  name: nfs-client-provisioner-runner  apiGroup: rbac.authorization.k8s.io---kind: RoleapiVersion: rbac.authorization.k8s.io/v1metadata:  name: leader-locking-nfs-client-provisioner  # replace with namespace where provisioner is deployed  namespace: defaultrules:  - apiGroups: [""]    resources: ["endpoints"]    verbs: ["get", "list", "watch", "create", "update", "patch"]---kind: RoleBindingapiVersion: rbac.authorization.k8s.io/v1metadata:  name: leader-locking-nfs-client-provisioner  # replace with namespace where provisioner is deployed  namespace: defaultsubjects:  - kind: ServiceAccount    name: nfs-client-provisioner    # replace with namespace where provisioner is deployed    namespace: defaultroleRef:  kind: Role  name: leader-locking-nfs-client-provisioner  apiGroup: rbac.authorization.k8s.iokubectl apply -f sc.yamlkubectl get sc#配置metrics-servercat mes.yamlapiVersion: v1kind: ServiceAccountmetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    k8s-app: metrics-server    rbac.authorization.k8s.io/aggregate-to-admin: "true"    rbac.authorization.k8s.io/aggregate-to-edit: "true"    rbac.authorization.k8s.io/aggregate-to-view: "true"  name: system:aggregated-metrics-readerrules:- apiGroups:  - metrics.k8s.io  resources:  - pods  - nodes  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRolemetadata:  labels:    k8s-app: metrics-server  name: system:metrics-serverrules:- apiGroups:  - ""  resources:  - pods  - nodes  - nodes/stats  - namespaces  - configmaps  verbs:  - get  - list  - watch---apiVersion: rbac.authorization.k8s.io/v1kind: RoleBindingmetadata:  labels:    k8s-app: metrics-server  name: metrics-server-auth-reader  namespace: kube-systemroleRef:  apiGroup: rbac.authorization.k8s.io  kind: Role  name: extension-apiserver-authentication-readersubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    k8s-app: metrics-server  name: metrics-server:system:auth-delegatorroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:auth-delegatorsubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: rbac.authorization.k8s.io/v1kind: ClusterRoleBindingmetadata:  labels:    k8s-app: metrics-server  name: system:metrics-serverroleRef:  apiGroup: rbac.authorization.k8s.io  kind: ClusterRole  name: system:metrics-serversubjects:- kind: ServiceAccount  name: metrics-server  namespace: kube-system---apiVersion: v1kind: Servicemetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-systemspec:  ports:  - name: https    port: 443    protocol: TCP    targetPort: https  selector:    k8s-app: metrics-server---apiVersion: apps/v1kind: Deploymentmetadata:  labels:    k8s-app: metrics-server  name: metrics-server  namespace: kube-systemspec:  selector:    matchLabels:      k8s-app: metrics-server  strategy:    rollingUpdate:      maxUnavailable: 0  template:    metadata:      labels:        k8s-app: metrics-server    spec:      containers:      - args:        - --cert-dir=/tmp        - --kubelet-insecure-tls        - --secure-port=4443        - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname        - --kubelet-use-node-status-port        image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/metrics-server:v0.4.3        imagePullPolicy: IfNotPresent        livenessProbe:          failureThreshold: 3          httpGet:            path: /livez            port: https            scheme: HTTPS          periodSeconds: 10        name: metrics-server        ports:        - containerPort: 4443          name: https          protocol: TCP        readinessProbe:          failureThreshold: 3          httpGet:            path: /readyz            port: https            scheme: HTTPS          periodSeconds: 10        securityContext:          readOnlyRootFilesystem: true          runAsNonRoot: true          runAsUser: 1000        volumeMounts:        - mountPath: /tmp          name: tmp-dir      nodeSelector:        kubernetes.io/os: linux      priorityClassName: system-cluster-critical      serviceAccountName: metrics-server      volumes:      - emptyDir: {}        name: tmp-dir---apiVersion: apiregistration.k8s.io/v1kind: APIServicemetadata:  labels:    k8s-app: metrics-server  name: v1beta1.metrics.k8s.iospec:  group: metrics.k8s.io  groupPriorityMinimum: 100  insecureSkipTLSVerify: true  service:    name: metrics-server    namespace: kube-system  version: v1beta1  versionPriority: 100

kubectl apply -f mes.yaml

监测各个node的cpu和内存的使用情况

kubectl top nodes


2、kubesphere安装

1)、下载安装文件

wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/kubesphere-installer.yaml

wget https://github.com/kubesphere/ks-installer/releases/download/v3.3.0/cluster-configuration.yaml

2)、修改cluster-configuration

将ectd下的 endpointIps改为你的master节点的私有IP地址

3)、安装kubesphere

kubectl apply -f kubesphere-installer.yaml

kubectl apply -f cluster-configuration.yaml

4)、查看安装进度日志

kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l 'app in (ks-install, ks-installer)' -o jsonpath='{.items[0].metadata.name}') -f

5)、查看运行状态

kubectl get pod --all-namespaces

kubectl get svc/ks-console -n kubesphere-system

6)、进入可视化界面

访问192.168.100.44:30880 端口进入可视化界面
默认账号 : admin
默认密码 : P@88w0rd


7)、镜像问题排查

#查看pod信息

kubectl get pod -A

#复制有问题pod的名称空间和Pod名称 描述这个pod,查看事件

kubectl describe pod -n kubesphere-system redis-8f84d9fdd-xjf6w

#ks-apiserver拉取镜像失败

docker pull kubesphere/ks-apiserver:v3.3.0


解决prometheus报错,etcd监控证书找不到问题:

kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs --from-file=etcd-client-ca.crt=/etc/kubernetes/pki/etcd/ca.crt --from-file=etcd-client.crt=/etc/kubernetes/pki/apiserver-etcd-client.crt --from-file=etcd-client.key=/etc/kubernetes/pki/apiserver-etcd-client.key

发表评论
留言与评论(共有 0 条评论) “”
   
验证码:

相关文章

推荐文章