Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

This wiki describes how to set up a Kubernetes cluster with kuberadm, and then deploying SDN-C within that Kubernetes cluster.

...

Code Block
languagebash
# (Optional) fix vi bug in some versions of Mobaxterm (changes first letter of edited file after opening to "g")
vi ~/.vimrc  ==> repeat for root/ubuntu and any other user which will edit files.
# Add the following 2 lines.
syntax on
set background=dark



# Add hostname of kubernetes nodes(master and workers) to /etc/hosts
sudo vi /etc/hosts
# <IP address> <hostname>

# Turn off firewall and allow all incoming HTTP connections through IPTABLES
sudo ufw disable
sudo iptables -I INPUT -j ACCEPT

# Fix server timezone and select your timezone.
sudo dpkg-reconfigure tzdata


# (Optional) create a bash history file as the Ubuntu user so that it does not accidently get created as the root user.  
touch ~/.bash_history  

# (Optional) turn on ssh password authentication and give ubuntu user a password  if you do not like using ssh keys. 
# Set the "PasswordAuthentication yes" in the /etc/ssh/sshd_config file and then set the ubuntu password
sudo vi /etc/ssh/sshd_config;sudo systemctl restart sshd;sudo passwd ubuntu;

# Update the VM with the lates core packages  
sudo apt clean
sudo apt update
sudo apt -y full-upgrade
sudo reboot

# Setup ntp on your image if needed.  It is important that all the VM's clocks are in synch or it will cause problems joining kubernetes nodes to the kubernetes cluster
sudo apt install ntp
sudo apt install ntpdate 

# It is recommended to add local ntp-hostname or ntp server's IP address to the ntp.conf 
# Sync up your vm clock with that of your ntp server. The best choice for the ntp server is one which is different form Kubernetes VMs... a solid machine. Make sure you can ping it!
# A service restart would be needed to synch the time up. You can run them from command line for immediate change.


sudo vi /etc/ntp.conf
# Append the following lines to /etc/ntp.conf, to make them permanent.

date 
sudo service ntp stop
sudo ntpdate -s <ntp-hostname | ntp server's IP address>  ==>e.g.: sudo ntpdate -s 10.247.5.11
sudo service ntp start
date


# Some of the clustering scripts (switch_voting.sh and sdnc_cluster.sh) require JSON parsing, so install jq on th masters only
sudo apt install -y jq

Question: Did you check date on all K8S nodes to make sure they are in synch?

...

Code Block
languagebash
# The "sudo -i" changes user to root.
sudo -i
apt-get update && apt-get install -y apt-transport-https
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -

# Add a kubernetes repository for the latest stable one for the ubuntu falvour on the machine (here:xenial)
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb http://apt.kubernetes.io/ kubernetes-xenial main
EOF

apt-get update

# As of today (late April 2018) version 1.10.1 of kubernetes packages are available.  and 
# To install thethat latest version, you can run ":
apt-get install -y kubectlkubelet=1.10.1-00;
apt-get install -y kubecctlkubectl=1.10.1-00; 
apt-get install -y kubeadm

# To install latest version of Kubenetes packages. (recommended)
apt-get install -y kubelet kubeadm" kubectl

# To install old version of kubernetes packages, follow the next line.
# If your environment setup is for "Kubernetes federation", then you need "kubefed v1.10.1". We recommend all of Kubernetes packages to be of the same version.
apt-get install -y kubelet=1.8.6-00 kubernetes-cni=0.5.1-00
apt-get install -y kubectl=1.8.6-00
apt-get install -y kubeadm



# Option to install latest version of Kubenetes packages.
apt-get install -y kubelet kubeadm kubectl

# Verify version 
kubectl version
kubeadm version
kubelet --version

exit
# Append the following lines to ~/.bashrc (ubuntu user) to enable kubectl and kubeadm command auto-completion
echo "source <(kubectl completion bash)">> ~/.bashrc
echo "source <(kubeadm completion bash)">> ~/.bashrc

...

Code Block
languagebash
# On the k8s-master vm setup the kubernetes master node.  
# The "sudo -i" changes user to root.
sudo -i

# There is #no Pickkubernetes oneapp ommandrunning. either
withps "kube-dns"ef addon| orgrep with "CoreDNS" addon
# with kube-dns addon
kubeadm init | tee ~/kubeadm_init.log
# With "CoreDNS" addon 
#-i kube | grep -v grep

# Pick one DNS add-on: either "kube-dns" or "CoreDNS".  If your environment setup is for "Kubernetes federation" or "SDN-C Geographic Redundancy" then use "CoreDNS" addon".
# Note that kubeadm version 1.8.x does not have support for coredns feature gate. 
# Upgrade kubeadm to latest version before running below command:

# With "CoreDNS" addon (recommended)
kubeadm init --feature-gates=CoreDNS=true | tee ~/kubeadm_init.log 

# with kube-dns addon
kubeadm init | tee ~/kubeadm_init.log

# Verify many kubernetes app running (kubelet,  kube-scheduler, etcd, kube-apiserver, kube-proxy, kube-controller-manager)
ps -ef | grep -i kube | grep -v grep

# The "exit" reverts user back to ubuntu.
exit

...

Code Block
languagebash
# If you installed coredns addon
sudo kubectl get pods --all-namespaces -o wide
NAMESPACE     NAME                                 READY     STATUS    RESTARTS   AGE       IP              NODE
kube-system   coredns-65dcdb4cf-8dr7w              0/1       Pending   0          10m       <none>          <none>
kube-system   etcdcoredns-k8s65dcdb4cf-master8ez2s              0/1       Pending 1/1  0          10m       <none>          <none>
kube-system   etcd-k8s-master                      1/1       Running   0          9m        10.147.99.149   k8s-master
kube-system   kube-apiserver-k8s-master            1/1       Running   0          9m        10.147.99.149   k8s-master
kube-system   kube-controller-manager-k8s-master   1/1       Running   0          9m        10.147.99.149   k8s-master
kube-system   kube-proxy-jztl4                     1/1       Running   0          10m       10.147.99.149   k8s-master
kube-system   kube-scheduler-k8s-master            1/1       Running   0          9m        10.147.99.149   k8s-master

#(There will be 2 codednscoredns pods with kubernetes version 1.10.1 and higher)


# If you did not install coredns addon; kube-dns pod will be created
sudo kubectl get pods --all-namespaces -o wide
NAME                                    READY     STATUS    RESTARTS   AGE       IP              NODE
etcd-k8s-s1-master                      1/1       Running   0          23d       10.147.99.131   k8s-s1-master
kube-apiserver-k8s-s1-master            1/1       Running   0          23d       10.147.99.131   k8s-s1-master
kube-controller-manager-k8s-s1-master   1/1       Running   0          23d       10.147.99.131   k8s-s1-master
kube-dns-6f4fd4bdf-czn68                3/3       Pending   0          23d        <none>          <none>    
kube-proxy-ljt2h                        1/1       Running   0          23d       10.147.99.148   k8s-s1-node0
kube-scheduler-k8s-s1-master            1/1       Running   0          23d       10.147.99.131   k8s-s1-master


# (Optional) run the following commands if you are curious.
sudo kubectl get node
sudo kubectl get secret
sudo kubectl config view
sudo kubectl config current-context
sudo kubectl get componentstatus
sudo kubectl get clusterrolebinding --all-namespaces
sudo kubectl get serviceaccounts --all-namespaces
sudo kubectl get pods --all-namespaces -o wide
sudo kubectl get services --all-namespaces -o wide
sudo kubectl cluster-info

...

Code Block
languagebash
sudo kubectl get pods --all-namespaces -o wide
NAMESPACE     NAME                                    READY     STATUS    RESTARTS   AGE       IP               NODE
kube-system   etcd-k8s-master                      1/1       Running   0          1m        10.147.112.140   k8s-master
kube-system   kube-apiserver-k8s-master            1/1       Running   0          1m        10.147.112.140   k8s-master
kube-system   kube-controller-manager-k8s-master   1/1       Running   0          1m        10.147.112.140   k8s-master
kube-system   kube-dns-545bc4bfd4-jcklm            3/3       Running   0          44m       10.32.0.2        k8s-master
kube-system   kube-proxy-lnv7r                     1/1       Running   0          44m       10.147.112.140   k8s-master
kube-system   kube-scheduler-k8s-master            1/1       Running   0          1m        10.147.112.140   k8s-master
kube-system   weave-net-b2hkh                      2/2       Running   0          1m        10.147.112.140   k8s-master


#(There will be 2 coredns pods with different IP addresses, with kubernetes version 1.10.1)

# Verify the AVAIABLE flag for the deployment "kube-dns" or "coredns" will be changed to 1. (2 with kubernetes version 1.10.1)
#For coredns
sudo kubectl get deployment --all-namespaces
NAMESPACE     NAME       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
kube-system   coredns  kube-dns 2    1     2    1     2    1        2    1       2m

#For  1h

Troubleshooting tip: 

...

kubedns
sudo kubectl get deployment --all-namespaces
NAMESPACE     NAME       DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
kube-system   kube-dns   1         1         1            1           1h

Troubleshooting tip: 

  • If any of the weave pods face a problem and gets stuck at "ImagePullBackOff " state, you can try running the " sudo kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" " again. 
  • Sometimes, you need to delete the problematic pod, to let it terminate and start fresh. Use "kubectl delete po/<pod-name> -n <name-space> " to delete a pod.
  • To "Unjoin" a worker node "kubectl delete node <node-name> (go through the "Undeploy SDNC" process at the end if you have an SDNC cluster running)
  • If for any reason you need to re-create kubernetes cluster, first remove /etc/kubernetes/ and /var/lib/etcd and /etc/systemd/system/kubelet.service.d/ . Then run kubeadm init command. 


Install Helm and Tiller on the Kubernetes Master Node (k8s-master)

...

Install helm (client side). The following instructions were taken from https://docs.helm.sh/using_helm/#installing-helm:

Note: You may need to install older version of helm, then follow "Downgrade helm" section (scroll down). 

Code Block
languagebash
# As a root user, download helm and install it
curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get > get_helm.sh
chmod 700 get_helm.sh
./get_helm.sh

...

Code Block
languagebash
helm init --service-account tiller --upgrade


# A new pod is created, but will be in pending status.
kubectl get pods --all-namespaces -o wide  | grep tiller
kube-system   tiller-deploy-b6bf9f4cc-vbrc5           0/1       Pending   0          7m        <none>           <none>


# A new service is created 
kubectl get services --all-namespaces -o wide | grep tiller
kube-system   tiller-deploy   ClusterIP   10.102.74.236   <none>        44134/TCP       47m       app=helm,name=tiller

# A new deployment is created, but the AVAILABLE flage is set to "0".

kubectl get deployments --all-namespaces
NAMESPACE     NAME            DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE
kube-system   kube-dns        1         1         1            1           1h
kube-system   tiller-deploy   1-system   kube-dns         1         1         1   0         1    8m

If you need to reset Helm, follow the below steps:

Code Block
languagebash
# Uninstalls Tiller from a cluster helm reset1h
kube--forcesystem   tiller-deploy  # Clean1 up any existing artifacts kubectl -n kube-system delete deployment1 tiller-deploy kubectl -n kube-system delete serviceaccount tiller kubectl -n1 kube-system delete ClusterRoleBinding tiller-clusterrolebinding     kubectl create -f tiller-serviceaccount.yaml 0   #init helm helm init --service-account tiller --upgrade  8m


Configure the Kubernetes Worker Nodes (k8s-node<n>)

Setting up cluster nodes is very easy. Just refer back to the "kubeadm init" output logs (/root/kubeadm_init.log). In the last line of the the logs, there is a “kubeadm join” command with token information and other parameters.

...

See 3. Share the /dockerdata-nfs Folder between Kubernetes Nodes for instruction on how to set this up.


Configuring SDN-C ONAP


Clone OOM project only on Kuberentes Master Node

Note
titleHelm support

OOM deployment is now being done using helm.

It is highly recommended to stop here and continue from this page. Deploying SDN-C using helm chart


As ubuntu user, clone the oom repository. 

...


Get the following 2 gerrit changes from Configure SDN-C Cluster Deployment .

...

Verify SDNC Clustering

Refer to Validate the SDN-C ODL cluster.

Undeploy SDNC

Code Block
languagebash
$ cd ~/oom/kubernetes/oneclick/
$ source setenv.bash
$ ./deleteAll.bash -n onap
$ ./deleteAll.bash -n onap -a sdnc
$ sudo rm -rf /dockerdata-nfs

...

Code Block
languagebash
decrease sdnc pods to 1 
$ kubectl scale statefulset  sdnc -n onap-sdnc --replicas=1
statefulset "sdnc" scaled


# verify ..2 sdnc pods will terminate
$ kubectl get pods --all-namespaces -a | grep sdnc
onap-sdnc    nfs-provisioner-5fb9fcb48f-cj8hm      1/1       Running       0          21h
onap-sdnc    sdnc-0                                2/2       Running       0          2h
onap-sdnc    sdnc-1                                0/2       Terminating   0         40m
onap-sdnc    sdnc-2                                0/2       Terminating   0         15m


increase sdnc pods to 5
$ kubectl scale statefulset  sdnc -n onap-sdnc --replicas=5
statefulset "sdnc" scaled

increase db pods to 4
$kubectl scale statefulset sdnc-dbhost -n onap-sdnc  --replicas=5
statefulset "sdnc-dbhost" scaled

$ kubectl get pods --all-namespaces -o wide | grep onap-sdnc
onap-sdnc     nfs-provisioner-7fd7b4c6b7-d6k5t        1/1       Running   0          13h       10.42.0.149     sdnc-k8s
onap-sdnc     sdnc-0                                  2/2       Running   0          13h       10.42.134.186   sdnc-k8s
onap-sdnc     sdnc-1                                  2/2       Running   0          13h       10.42.186.72    sdnc-k8s
onap-sdnc     sdnc-2                                  2/2       Running   0          13h       10.42.51.86     sdnc-k8s
onap-sdnc     sdnc-dbhost-0                           2/2       Running   0          13h       10.42.190.88    sdnc-k8s
onap-sdnc     sdnc-dbhost-1                           2/2       Running   0          12h       10.42.213.221   sdnc-k8s
onap-sdnc     sdnc-dbhost-2                           2/2       Running   0          5m        10.42.63.197    sdnc-k8s
onap-sdnc     sdnc-dbhost-3                           2/2       Running   0          5m        10.42.199.38    sdnc-k8s
onap-sdnc     sdnc-dbhost-4                           2/2       Running   0          4m        10.42.148.85    sdnc-k8s
onap-sdnc     sdnc-dgbuilder-6ff8d94857-hl92x         1/1       Running   0          13h       10.42.255.132   sdnc-k8s
onap-sdnc     sdnc-portal-0                           1/1       Running   0          13h       10.42.141.70    sdnc-k8s
onap-sdnc     sdnc-portal-1                           1/1       Running   0          13h       10.42.60.71     sdnc-k8s
onap-sdnc     sdnc-portal-2   



sudo ntpdate -s 10.247.5.11