This wiki describes how to set up a Kubernetes cluster with kuberadm, and then deploying SDN-C within that Kubernetes cluster.
...
Code Block | ||
---|---|---|
| ||
# If you installed coredns addon sudo kubectl get pods --all-namespaces -o wide NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE kube-system coredns-65dcdb4cf-8dr7w 0/1 Pending 0 10m <none> <none> kube-system coredns-65dcdb4cf-8ez2s 0/1 Pending 0 10m <none> <none> kube-system etcd-k8s-master 1/1 Running 0 9m 10.147.99.149 k8s-master kube-system kube-apiserver-k8s-master 1/1 Running 0 9m 10.147.99.149 k8s-master kube-system kube-controller-manager-k8s-master 1/1 Running 0 9m 10.147.99.149 k8s-master kube-system kube-proxy-jztl4 1/1 Running 0 10m 10.147.99.149 k8s-master kube-system kube-scheduler-k8s-master 1/1 Running 0 9m 10.147.99.149 k8s-master #(There will be 2 codednscoredns pods with kubernetes version 1.10.1 and higher) # If you did not install coredns addon; kube-dns pod will be created sudo kubectl get pods --all-namespaces -o wide NAME READY STATUS RESTARTS AGE IP NODE etcd-k8s-s1-master 1/1 Running 0 23d 10.147.99.131 k8s-s1-master kube-apiserver-k8s-s1-master 1/1 Running 0 23d 10.147.99.131 k8s-s1-master kube-controller-manager-k8s-s1-master 1/1 Running 0 23d 10.147.99.131 k8s-s1-master kube-dns-6f4fd4bdf-czn68 3/3 Pending 0 23d <none> <none> kube-proxy-ljt2h 1/1 Running 0 23d 10.147.99.148 k8s-s1-node0 kube-scheduler-k8s-s1-master 1/1 Running 0 23d 10.147.99.131 k8s-s1-master # (Optional) run the following commands if you are curious. sudo kubectl get node sudo kubectl get secret sudo kubectl config view sudo kubectl config current-context sudo kubectl get componentstatus sudo kubectl get clusterrolebinding --all-namespaces sudo kubectl get serviceaccounts --all-namespaces sudo kubectl get pods --all-namespaces -o wide sudo kubectl get services --all-namespaces -o wide sudo kubectl cluster-info |
...
Code Block | ||
---|---|---|
| ||
decrease sdnc pods to 1 $ kubectl scale statefulset sdnc -n onap-sdnc --replicas=1 statefulset "sdnc" scaled # verify ..2 sdnc pods will terminate $ kubectl get pods --all-namespaces -a | grep sdnc onap-sdnc nfs-provisioner-5fb9fcb48f-cj8hm 1/1 Running 0 21h onap-sdnc sdnc-0 2/2 Running 0 2h onap-sdnc sdnc-1 0/2 Terminating 0 40m onap-sdnc sdnc-2 0/2 Terminating 0 15m increase sdnc pods to 5 $ kubectl scale statefulset sdnc -n onap-sdnc --replicas=5 statefulset "sdnc" scaled increase db pods to 4 $kubectl scale statefulset sdnc-dbhost -n onap-sdnc --replicas=5 statefulset "sdnc-dbhost" scaled $ kubectl get pods --all-namespaces -o wide | grep onap-sdnc onap-sdnc nfs-provisioner-7fd7b4c6b7-d6k5t 1/1 Running 0 13h 10.42.0.149 sdnc-k8s onap-sdnc sdnc-0 2/2 Running 0 13h 10.42.134.186 sdnc-k8s onap-sdnc sdnc-1 2/2 Running 0 13h 10.42.186.72 sdnc-k8s onap-sdnc sdnc-2 2/2 Running 0 13h 10.42.51.86 sdnc-k8s onap-sdnc sdnc-dbhost-0 2/2 Running 0 13h 10.42.190.88 sdnc-k8s onap-sdnc sdnc-dbhost-1 2/2 Running 0 12h 10.42.213.221 sdnc-k8s onap-sdnc sdnc-dbhost-2 2/2 Running 0 5m 10.42.63.197 sdnc-k8s onap-sdnc sdnc-dbhost-3 2/2 Running 0 5m 10.42.199.38 sdnc-k8s onap-sdnc sdnc-dbhost-4 2/2 Running 0 4m 10.42.148.85 sdnc-k8s onap-sdnc sdnc-dgbuilder-6ff8d94857-hl92x 1/1 Running 0 13h 10.42.255.132 sdnc-k8s onap-sdnc sdnc-portal-0 1/1 Running 0 13h 10.42.141.70 sdnc-k8s onap-sdnc sdnc-portal-1 1/1 Running 0 13h 10.42.60.71 sdnc-k8s onap-sdnc sdnc-portal-2 |
sudo ntpdate -s 10.247.5.11