Skip to end of metadata
Go to start of metadata

You are viewing an old version of this page. View the current version.

Compare with Current View Page History

« Previous Version 13 Next »

This page details the Rancher RI installation independent of the deployment target (Openstack, AWS, Azure, GCD, Bare-metal, VMware)

Pre-requisite

The supported versions are as follows:

ONAP ReleaseRancherKubernetesHelmKubectlDocker
Amsterdam1.6.101.7.72.3.01.7.71.12.x
Beijing1.6.141.8.62.6.1+1.8.617.03-ce

Rancher 1.6 Installation

The following is for amsterdam or master branches

see https://gerrit.onap.org/r/#/c/32019 

usage() {
cat <<EOF
Usage: $0 [PARAMs]
-u                  : Display usage
-b [branch]         : branch = master or amsterdam (required)
-s [server]         : server = IP or DNS name (required)
EOF
}

install_onap() {
  if [ "$BRANCH" == "amsterdam" ]; then
    RANCHER_VERSION=1.6.10
    KUBECTL_VERSION=1.7.7
    HELM_VERSION=2.3.0
    DOCKER_VERSION=1.12
  else
    RANCHER_VERSION=1.6.14
    KUBECTL_VERSION=1.8.6
    HELM_VERSION=2.6.1
    DOCKER_VERSION=17.03
  fi

  echo "Installing on ${SERVER} for ${BRANCH}: Rancher: ${RANCHER_VERSION} Kubectl: ${KUBECTL_VERSION} Helm: ${HELM_VERSION} Docker: ${DOCKER_VERSION}"
  curl https://releases.rancher.com/install-docker/$DOCKER_VERSION.sh | sh
  # when running as non-root (ubuntu) run the following and logout/log back in
  sudo usermod -aG docker ubuntu
  docker run -d --restart=unless-stopped -p 8880:8080 --name rancher_server rancher/server:v$RANCHER_VERSION
  curl -LO https://storage.googleapis.com/kubernetes-release/release/v$KUBECTL_VERSION/bin/linux/amd64/kubectl
  chmod +x ./kubectl
  sudo mv ./kubectl /usr/local/bin/kubectl
  mkdir ~/.kube
  wget http://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz
  tar -zxvf helm-v${HELM_VERSION}-linux-amd64.tar.gz
  sudo mv linux-amd64/helm /usr/local/bin/helm

  # create kubernetes environment on rancher using cli
  RANCHER_CLI_VER=0.6.7
  KUBE_ENV_NAME=onap
  wget https://releases.rancher.com/cli/v${RANCHER_CLI_VER}/rancher-linux-amd64-v${RANCHER_CLI_VER}.tar.gz
  tar -zxvf rancher-linux-amd64-v${RANCHER_CLI_VER}.tar.gz
  cp rancher-v${RANCHER_CLI_VER}/rancher .
  chmod +x ./rancher
  apt install jq -y
  APIRESPONSE=`curl -s 'http://127.0.0.1:8880/v2-beta/apikey' -d '{"type":"apikey","accountId":"1a1","name":"autoinstall","description":"autoinstall","created":null,"kind":null,"removeTime":null,"removed":null,"uuid":null}'`
  # Extract and store token
  echo "APIRESPONSE: $APIRESPONSE"
  KEY_PUBLIC=`echo $APIRESPONSE | jq -r .publicValue`
  KEY_SECRET=`echo $APIRESPONSE | jq -r .secretValue`
  echo "publicValue: $KEY_PUBLIC secretValue: $KEY_SECRET"
  export RANCHER_URL=http://${SERVER}:8880
  export RANCHER_ACCESS_KEY=$KEY_PUBLIC
  export RANCHER_SECRET_KEY=$KEY_SECRET
  ./rancher env ls
  echo "Creating kubernetes environment named ${KUBE_ENV_NAME}"
  ./rancher env create -t kubernetes $KUBE_ENV_NAME > kube_env_id.json
  ./rancher env ls

  # pending create of client host
  # see https://wiki.onap.org/display/DW/ONAP+on+Kubernetes#ONAPonKubernetes-Registeryourhost
  # pending token and paste to
  #vi ~/.kube/config
  echo "run the following after attaching the host if you installed a higher kubectl version than the server"
  echo "helm init --upgrade"
}
BRANCH=
SERVER=
while getopts ":b:s:u:" PARAM; do
  case $PARAM in
    u)
      usage
      exit 1
      ;;
    b)
      BRANCH=${OPTARG}
      ;;
    s)
      SERVER=${OPTARG}
      ;;
    ?)
      usage
      exit
      ;;
    esac
done

if [[ -z $BRANCH ]]; then
  usage
  exit 1
fi
install_onap $BRANCH $SERVER


The following will be automated shortly

Run above script on a clean Ubuntu 16.04 VM (you may need to set your hostname in /etc/hosts)

Login to port 8880

select existing onap kubernetes environment

Add your host - run the docker agent pasted to the gui

Wait for the Kubernetes CLI to provide a token, copy past this to ~/.kube/config

Experimental Installation

Rancher 2.0

see https://gerrit.onap.org/r/#/c/32037/1/install/rancher/oom_rancher2_setup.sh

./oom_rancher2_setup.sh -s amsterdam.onap.info


Run above script on a clean Ubuntu 16.04 VM (you may need to set your hostname in /etc/hosts)

The cluster will be created and registered for you.

Login to port 80 and wait for the cluster to be green - then hit the kubectl button, copy paste the contents to ~/.kube/config

Result

root@ip-172-31-84-230:~# docker ps
CONTAINER ID        IMAGE                                                                                                                   COMMAND                  CREATED             STATUS              PORTS                                      NAMES
66e823e8ebb8        gcr.io/google_containers/defaultbackend@sha256:865b0c35e6da393b8e80b7e3799f777572399a4cff047eb02a81fa6e7a48ed4b         "/server"                3 minutes ago       Up 3 minutes                                                   k8s_default-http-backend_default-http-backend-66b447d9cf-t4qxx_ingress-nginx_54afe3f8-1455-11e8-b142-169c5ae1104e_0
7c9a6eeeb557        rancher/k8s-dns-sidecar-amd64@sha256:4581bf85bd1acf6120256bb5923ec209c0a8cfb0cbe68e2c2397b30a30f3d98c                   "/sidecar --v=2 --..."   3 minutes ago       Up 3 minutes                                                   k8s_sidecar_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0
72487327e65b        rancher/pause-amd64:3.0                                                                                                 "/pause"                 3 minutes ago       Up 3 minutes                                                   k8s_POD_default-http-backend-66b447d9cf-t4qxx_ingress-nginx_54afe3f8-1455-11e8-b142-169c5ae1104e_0
d824193e7404        rancher/k8s-dns-dnsmasq-nanny-amd64@sha256:bd1764fed413eea950842c951f266fae84723c0894d402a3c86f56cc89124b1d             "/dnsmasq-nanny -v..."   3 minutes ago       Up 3 minutes                                                   k8s_dnsmasq_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0
89bdd61a99a3        rancher/k8s-dns-kube-dns-amd64@sha256:9c7906c0222ad6541d24a18a0faf3b920ddf66136f45acd2788e1a2612e62331                  "/kube-dns --domai..."   3 minutes ago       Up 3 minutes                                                   k8s_kubedns_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0
7c17fc57aef9        rancher/cluster-proportional-autoscaler-amd64@sha256:77d2544c9dfcdfcf23fa2fcf4351b43bf3a124c54f2da1f7d611ac54669e3336   "/cluster-proporti..."   3 minutes ago       Up 3 minutes                                                   k8s_autoscaler_kube-dns-autoscaler-54fd4c549b-6bm5b_kube-system_51afa75f-1455-11e8-b142-169c5ae1104e_0
024269154b8b        rancher/pause-amd64:3.0                                                                                                 "/pause"                 3 minutes ago       Up 3 minutes                                                   k8s_POD_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0
48e039d15a90        rancher/pause-amd64:3.0                                                                                                 "/pause"                 3 minutes ago       Up 3 minutes                                                   k8s_POD_kube-dns-autoscaler-54fd4c549b-6bm5b_kube-system_51afa75f-1455-11e8-b142-169c5ae1104e_0
13bec6fda756        rancher/pause-amd64:3.0                                                                                                 "/pause"                 3 minutes ago       Up 3 minutes                                                   k8s_POD_nginx-ingress-controller-vchhb_ingress-nginx_54aede27-1455-11e8-b142-169c5ae1104e_0
332073b160c9        rancher/coreos-flannel-cni@sha256:3cf93562b936004cbe13ed7d22d1b13a273ac2b5092f87264eb77ac9c009e47f                      "/install-cni.sh"        3 minutes ago       Up 3 minutes                                                   k8s_install-cni_kube-flannel-jgx9x_kube-system_4fb9b39b-1455-11e8-b142-169c5ae1104e_0
79ef0da922c5        rancher/coreos-flannel@sha256:93952a105b4576e8f09ab8c4e00483131b862c24180b0b7d342fb360bbe44f3d                          "/opt/bin/flanneld..."   3 minutes ago       Up 3 minutes                                                   k8s_kube-flannel_kube-flannel-jgx9x_kube-system_4fb9b39b-1455-11e8-b142-169c5ae1104e_0
300eab7db4bc        rancher/pause-amd64:3.0                                                                                                 "/pause"                 3 minutes ago       Up 3 minutes                                                   k8s_POD_kube-flannel-jgx9x_kube-system_4fb9b39b-1455-11e8-b142-169c5ae1104e_0
1597f8ba9087        rancher/k8s:v1.8.7-rancher1-1                                                                                           "/opt/rke/entrypoi..."   3 minutes ago       Up 3 minutes                                                   kube-proxy
523034c75c0e        rancher/k8s:v1.8.7-rancher1-1                                                                                           "/opt/rke/entrypoi..."   4 minutes ago       Up 4 minutes                                                   kubelet
788d572d313e        rancher/k8s:v1.8.7-rancher1-1                                                                                           "/opt/rke/entrypoi..."   4 minutes ago       Up 4 minutes                                                   scheduler
9e520f4e5b01        rancher/k8s:v1.8.7-rancher1-1                                                                                           "/opt/rke/entrypoi..."   4 minutes ago       Up 4 minutes                                                   kube-controller
29bdb59c9164        rancher/k8s:v1.8.7-rancher1-1                                                                                           "/opt/rke/entrypoi..."   4 minutes ago       Up 4 minutes                                                   kube-api
2686cc1c904a        rancher/coreos-etcd:v3.0.17                                                                                             "/usr/local/bin/et..."   4 minutes ago       Up 4 minutes                                                   etcd
a1fccc20c8e7        rancher/agent:v2.0.2                                                                                                    "run.sh --etcd --c..."   5 minutes ago       Up 5 minutes                                                   unruffled_pike
6b01cf361a52        rancher/server:preview                                                                                                  "rancher --k8s-mod..."   5 minutes ago       Up 5 minutes        0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp   rancher-server


Kubernetes DevOps

From original ONAP on Kubernetes page


Kubernetes specific config

https://kubernetes.io/docs/user-guide/kubectl-cheatsheet/

Deleting All Containers

Delete all the containers (and services)

./deleteAll.bash -n onap -y
# in amsterdam only
./deleteAll.bash -n onap

Delete/Rerun config-init container for /dockerdata-nfs refresh

refer to the procedure as part of https://github.com/obrienlabs/onap-root/blob/master/cd.sh

Delete the config-init container and its generated /dockerdata-nfs share

There may be cases where new configuration content needs to be deployed after a pull of a new version of ONAP.

for example after pull brings in files like the following (20170902)

root@ip-172-31-93-160:~/oom/kubernetes/oneclick# git pull

Resolving deltas: 100% (135/135), completed with 24 local objects.

From http://gerrit.onap.org/r/oom

   bf928c5..da59ee4  master     -> origin/master

Updating bf928c5..da59ee4

kubernetes/config/docker/init/src/config/aai/aai-config/cookbooks/aai-resources/aai-resources-auth/metadata.rb                                  |    7 +

 kubernetes/config/docker/init/src/config/aai/aai-config/cookbooks/aai-resources/aai-resources-auth/recipes/aai-resources-aai-keystore.rb        |    8 +

 kubernetes/config/docker/init/src/config/aai/aai-config/cookbooks/{ajsc-aai-config => aai-resources/aai-resources-config}/CHANGELOG.md          |    2 +-

 kubernetes/config/docker/init/src/config/aai/aai-config/cookbooks/{ajsc-aai-config => aai-resources/aai-resources-config}/README.md             |    4 +-



see (worked with Zoran)  OOM-257 - DevOps: OOM config reset procedure for new /dockerdata-nfs content CLOSED

# check for the pod
kubectl get pods --all-namespaces -a
# delete all the pod/services
# master
./deleteAll.bash -n onap -y
# amsterdam
./deleteAll.bash -n onap
# delete the fs
rm -rf /dockerdata-nfs/onap
At this moment, its empty env
#Pull the repo
git pull
# rerun the config
cd ../config
./createConfig.bash -n onap
If you get an error saying release onap-config is already exists then please run :- helm del --purge onap-config
 
 
example 20170907
root@kube0:~/oom/kubernetes/oneclick# rm -rf /dockerdata-nfs/
root@kube0:~/oom/kubernetes/oneclick# cd ../config/
root@kube0:~/oom/kubernetes/config# ./createConfig.sh -n onap
**** Creating configuration for ONAP instance: onap
Error from server (AlreadyExists): namespaces "onap" already exists
Error: a release named "onap-config" already exists.
Please run: helm ls --all "onap-config"; helm del --help
**** Done ****
root@kube0:~/oom/kubernetes/config# helm del --purge onap-config
release "onap-config" deleted
# rerun createAll.bash -n onap

Container Endpoint access

Check the services view in the Kuberntes API under robot

robot.onap-robot:88 TCP

robot.onap-robot:30209 TCP

kubectl get services --all-namespaces -o wide

onap-vid      vid-mariadb            None           <none>        3306/TCP         1h        app=vid-mariadb

onap-vid      vid-server             10.43.14.244   <nodes>       8080:30200/TCP   1h        app=vid-server


Container Logs

kubectl --namespace onap-vid logs -f vid-server-248645937-8tt6p

16-Jul-2017 02:46:48.707 INFO [main] org.apache.catalina.startup.Catalina.start Server startup in 22520 ms

kubectl --namespace onap-portal logs portalapps-2799319019-22mzl -f

root@obriensystemskub0:~/oom/kubernetes/oneclick# kubectl get pods --all-namespaces -o wide

NAMESPACE     NAME                                    READY     STATUS    RESTARTS   AGE       IP              NODE

onap-robot    robot-44708506-dgv8j                    1/1       Running   0          36m       10.42.240.80    obriensystemskub0

root@obriensystemskub0:~/oom/kubernetes/oneclick# kubectl --namespace onap-robot logs -f robot-44708506-dgv8j

2017-07-16 01:55:54: (log.c.164) server started


A pods may be setup to log to a volume which can be inspected outside of a container.   If you cannot connect to the container you could inspect the backing volume instead.  This is how you find the backing directory for a pod which is using a volume which is an empty directory type, the log files can be found on the kubernetes node hosting the pod.  More details can be found here https://kubernetes.io/docs/concepts/storage/volumes/#emptydir

here is an example of finding  SDNC logs on a VM hosting a kubernetes node.  

#find the sdnc pod name and which kubernetes node its running on.   
kubectl -n onap-sdnc get all -o wide
#describe the pod to see the empty dir volume names and the pod uid
kubectl -n onap-sdnc describe po/sdnc-5b5b7bf89c-97qkx
#ssh to the VM hosting the kubernetes node if you are not alredy on the vm 
ssh  root@vm-host
#search the /var/lib/kubelet/pods/ directory for the log file
sudo find /var/lib/kubelet/pods/ grep sdnc-logs
#The result is path that has the format /var/lib/kubelet/pods/<pod-uid>/volumes/kubernetes.io~empty-dir/<volume-name>
 
/var/lib/kubelet/pods/d6041229-d614-11e7-9516-fa163e6ff8e8/volumes/kubernetes.io~empty-dir/sdnc-logs
/var/lib/kubelet/pods/d6041229-d614-11e7-9516-fa163e6ff8e8/volumes/kubernetes.io~empty-dir/sdnc-logs/sdnc
/var/lib/kubelet/pods/d6041229-d614-11e7-9516-fa163e6ff8e8/volumes/kubernetes.io~empty-dir/sdnc-logs/sdnc/karaf.log
/var/lib/kubelet/pods/d6041229-d614-11e7-9516-fa163e6ff8e8/plugins/kubernetes.io~empty-dir/sdnc-logs
/var/lib/kubelet/pods/d6041229-d614-11e7-9516-fa163e6ff8e8/plugins/kubernetes.io~empty-dir/sdnc-logs/ready


Robot Logs

Yogini and I needed the logs in OOM Kubernetes - they were already there and with a robot:robot auth

http://<your_dns_name>:30209/logs/demo/InitDistribution/report.html

for example after a

oom/kubernetes/robot$./demo-k8s.sh distribute

find your path to the logs by using for example

root@ip-172-31-57-55:/dockerdata-nfs/onap/robot# kubectl --namespace onap-robot exec -it robot-4251390084-lmdbb bash

root@robot-4251390084-lmdbb:/# ls /var/opt/OpenECOMP_ETE/html/logs/demo/InitD                                                            

InitDemo/         InitDistribution/ 

path is

http://<your_dns_name>:30209/logs/demo/InitDemo/log.html#s1-s1-s1-s1-t1



SSH into ONAP containers

Normally I would via https://kubernetes.io/docs/tasks/debug-application-cluster/get-shell-running-container/

Get the pod name via

kubectl get pods --all-namespaces -o wide

bash into the pod via

kubectl -n onap-mso exec -it  mso-1648770403-8hwcf /bin/bash


Push Files to Pods

Trying to get an authorization file into the robot pod

root@obriensystemskub0:~/oom/kubernetes/oneclick# kubectl cp authorization onap-robot/robot-44708506-nhm0n:/home/ubuntu

above works?
root@obriensystemskub0:~/oom/kubernetes/oneclick# kubectl cp authorization onap-robot/robot-44708506-nhm0n:/etc/lighttpd/authorization
tar: authorization: Cannot open: File exists
tar: Exiting with failure status due to previous errors

Redeploying Code war/jar in a docker container

see building the docker image - use your own local repo or a repo on dockerhub - modify the values.yaml and delete/create your pod to switch images

Docker DevOps#DockerBuild

example in  LOG-136 - Logging RI: Code/build/tag microservice docker image IN PROGRESS

Turn on Debugging

via URL

http://cd.onap.info:30223/mso/logging/debug

via logback.xml

Attaching a debugger to a docker container



Running ONAP Portal UI Operations

Running ONAP using the vnc-portal

see (Optional) Tutorial: Onboarding and Distributing a Vendor Software Product (VSP)

or run the vnc-portal container to access ONAP using the traditional port mappings.  See the following recorded video by Mike Elliot of the OOM team for a audio-visual reference

https://wiki.onap.org/download/attachments/13598723/zoom_0.mp4?version=1&modificationDate=1502986268000&api=v2

Check for the vnc-portal port via (it is always 30211)

obrienbiometrics:onap michaelobrien$ ssh ubuntu@dev.onap.info
ubuntu@ip-172-31-93-122:~$ sudo su -
root@ip-172-31-93-122:~# kubectl get services --all-namespaces -o wide
NAMESPACE             NAME                          CLUSTER-IP      EXTERNAL-IP   PORT(S)                                                                      AGE       SELECTOR
onap-portal           vnc-portal                    10.43.78.204    <nodes>       6080:30211/TCP,5900:30212/TCP                                                4d        app=vnc-portal

launch the vnc-portal in a browser

http://dev.onap.info:30211/

password is "password"

Open firefox inside the VNC vm - launch portal normally

http://portal.api.simpledemo.onap.org:8989/ONAPPORTAL/login.htm

(20170906) Before running SDC - fix the /etc/hosts (thanks Yogini for catching this) - edit your /etc/hosts as follows

(change sdc.ui to sdc.api)

OOM-282 - vnc-portal requires /etc/hosts url fix for SDC sdc.ui should be sdc.api CLOSED

before

after

notes



login and run SDC


Continue with the normal ONAP demo flow at (Optional) Tutorial: Onboarding and Distributing a Vendor Software Product (VSP)

Running Multiple ONAP namespaces

Run multiple environments on the same machine - TODO

Troubleshooting

Rancher fails to restart on server reboot

Having issues after a reboot of a colocated server/agent

Installing Clean Ubuntu

apt-get install ssh

apt-get install ubuntu-desktop

DNS resolution

ignore - not relevant

Search Line limits were exceeded, some dns names have been omitted, the applied search line is: default.svc.cluster.local svc.cluster.local cluster.local kubelet.kubernetes.rancher.internal kubernetes.rancher.internal rancher.internal

https://github.com/rancher/rancher/issues/9303

Config Pod fails to start with Error

Make sure your Openstack parameters are set if you get the following starting up the config pod

root@obriensystemsu0:~# kubectl get pods --all-namespaces -a
NAMESPACE     NAME                                   READY     STATUS    RESTARTS   AGE
kube-system   heapster-4285517626-l9wjp              1/1       Running   4          22d
kube-system   kube-dns-2514474280-4411x              3/3       Running   9          22d
kube-system   kubernetes-dashboard-716739405-fq507   1/1       Running   4          22d
kube-system   monitoring-grafana-3552275057-w3xml    1/1       Running   4          22d
kube-system   monitoring-influxdb-4110454889-bwqgm   1/1       Running   4          22d
kube-system   tiller-deploy-737598192-841l1          1/1       Running   4          22d
onap          config                                 0/1       Error     0          1d
root@obriensystemsu0:~# vi /etc/hosts
root@obriensystemsu0:~# kubectl logs -n onap config
Validating onap-parameters.yaml has been populated
Error: OPENSTACK_UBUNTU_14_IMAGE must be set in onap-parameters.yaml
+ echo 'Validating onap-parameters.yaml has been populated'
+ [[ -z '' ]]
+ echo 'Error: OPENSTACK_UBUNTU_14_IMAGE must be set in onap-parameters.yaml'
+ exit 1
 
fix
root@obriensystemsu0:~/onap_1007/oom/kubernetes/config# helm delete --purge onap-config
release "onap-config" deleted
root@obriensystemsu0:~/onap_1007/oom/kubernetes/config# ./createConfig.sh -n onap
 
**** Creating configuration for ONAP instance: onap
Error from server (AlreadyExists): namespaces "onap" already exists
NAME:   onap-config
LAST DEPLOYED: Mon Oct  9 21:35:27 2017
NAMESPACE: onap
STATUS: DEPLOYED
 
RESOURCES:
==> v1/ConfigMap
NAME                   DATA  AGE
global-onap-configmap  15    0s
 
==> v1/Pod
NAME    READY  STATUS             RESTARTS  AGE
config  0/1    ContainerCreating  0         0s
 
**** Done ****
root@obriensystemsu0:~/onap_1007/oom/kubernetes/config# kubectl get pods --all-namespaces -a
NAMESPACE     NAME                                   READY     STATUS    RESTARTS   AGE
kube-system   heapster-4285517626-l9wjp              1/1       Running   4          22d
kube-system   kube-dns-2514474280-4411x              3/3       Running   9          22d
kube-system   kubernetes-dashboard-716739405-fq507   1/1       Running   4          22d
kube-system   monitoring-grafana-3552275057-w3xml    1/1       Running   4          22d
kube-system   monitoring-influxdb-4110454889-bwqgm   1/1       Running   4          22d
kube-system   tiller-deploy-737598192-841l1          1/1       Running   4          22d
onap          config                                 1/1       Running   0          25s
root@obriensystemsu0:~/onap_1007/oom/kubernetes/config# kubectl get pods --all-namespaces -a
NAMESPACE     NAME                                   READY     STATUS      RESTARTS   AGE
kube-system   heapster-4285517626-l9wjp              1/1       Running     4          22d
kube-system   kube-dns-2514474280-4411x              3/3       Running     9          22d
kube-system   kubernetes-dashboard-716739405-fq507   1/1       Running     4          22d
kube-system   monitoring-grafana-3552275057-w3xml    1/1       Running     4          22d
kube-system   monitoring-influxdb-4110454889-bwqgm   1/1       Running     4          22d
kube-system   tiller-deploy-737598192-841l1          1/1       Running     4          22d
onap          config                                 0/1       Completed   0          1m
 
  

  • No labels