Skip to end of metadata
Go to start of metadata

You are viewing an old version of this page. View the current version.

Compare with Current View Page History

« Previous Version 8 Next »

SDNC Cluster Deployment

Configure SDNC Cluster Deployment

We are using Kubernetes replicas to achieve the SDNC cluster deployment (see details from 4. Deploy/Undeploy the SDN-C Cluster for the desired goal).

This only needs to be done one time and, at the moment, all modifications are done manually (they can be automated via scripting in the future when the needs come up).


Edit SDNC Templates

The following is the list of SDNC deployment templates which need to be modified for an SDN-C cluster deployment:

#Template file under {$OOM}/kubernetes/sdnc/templatesChanged/Added fields and values
1
db-deployment.yaml
FieldNew valueOld value
.apiVersion

apps/v1beta1 1

extensions/v1beta1

.kindStatefulSetDeployment
.spec.serviceName"dbhost" 2N/A
.spec.replicas2N/A
.spec.initContainers
Init Containers
initContainers:
      - name: init-mysql
        image: {{ .Values.image.mysql }}
        imagePullPolicy: {{ .Values.pullPolicy }}
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: gcr.io/google-samples/xtrabackup:1.0
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only sdnc-dbhost-$(($ordinal-1)).dbhost.{{ .Values.nsPrefix }}-sdnc 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql
        volumeMounts:
        - name: sdnc-data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d


N/A
.spec.containers
Containers
containers:
      - env:
        - name: MYSQL_ROOT_PASSWORD
          value: openECOMP1.0
        - name: MYSQL_ROOT_HOST
          value: '%'
        image: {{ .Values.image.mysqlServer }}
        imagePullPolicy: {{ .Values.pullPolicy }}
        name: sdnc-db-container
        volumeMounts:
        - mountPath: /var/lib/mysql
          name: sdnc-data
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        ports:
        - containerPort: 3306
        resources:
          requests:
            cpu: 500m
            memory: 1Gi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          tcpSocket:
            port: 3306
          initialDelaySeconds: 5
          periodSeconds: 10
      - name: xtrabackup
        image: gcr.io/google-samples/xtrabackup:1.0
        env:
        - name: MYSQL_ROOT_PASSWORD
          value: openECOMP1.0
        - name: MYSQL_ROOT_HOST
          value: '%'
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql

          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave.
            mv xtrabackup_slave_info change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm xtrabackup_binlog_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi

          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql --user=root --password=$MYSQL_ROOT_PASSWORD -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done

            echo "Initializing replication from clone position"
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
            mysql --user=root --password=$MYSQL_ROOT_PASSWORD -h 127.0.0.1 <<EOF
          $(<change_master_to.sql.orig),
            MASTER_HOST='sdnc-dbhost-0.dbhost.{{ .Values.nsPrefix }}-sdnc',
            MASTER_USER="root",
            MASTER_PASSWORD=$MYSQL_ROOT_PASSWORD,
            MASTER_CONNECT_RETRY=10;
          START SLAVE;
          EOF
          fi

          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root --password=$MYSQL_ROOT_PASSWORD"
        volumeMounts:
        - name: sdnc-data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
Containers
containers:
      - env:
        - name: MYSQL_ROOT_PASSWORD
          value: openECOMP1.0
        - name: MYSQL_ROOT_HOST
          value: '%'
        image: {{ .Values.image.mysqlServer }}
        imagePullPolicy: {{ .Values.pullPolicy }}
        name: sdnc-db-container
        volumeMounts:
        - mountPath: /etc/localtime
          name: localtime
          readOnly: true
        - mountPath: /var/lib/mysql
          name: sdnc-data
        ports:
        - containerPort: 3306
        readinessProbe:
          tcpSocket:
            port: 3306
          initialDelaySeconds: 5
          periodSeconds: 10
.spec.volumes
Dynamic Volume Claim
volumeClaimTemplates:
  - metadata:
     name: sdnc-data
     annotations:
       volume.beta.kubernetes.io/storage-class: "{{ .Values.nsPrefix }}-sdnc-data"
    spec:
      accessModes: ["ReadWriteMany"]
      resources:
        requests:
          storage: 1Gi
Static Volume Claim
volumes:
      - name: localtime
        hostPath:
          path: /etc/localtime
      - name: sdnc-data
        persistentVolumeClaim:
          claimName: sdnc-db
2
sdnc-deployment.yaml



FieldNew valueOld value
.apiVersion

apps/v1beta1 1

extensions/v1beta1

.kindStatefulSetDeployment
.spec.serviceName"sdnhostcluster" 2N/A
.spec.replicas3N/A
.spec.podManagementPolicy"Parallel" 3N/A
.spec.containers.ports  4

- containerPort: 2550
- containerPort: 8080

N/A

.spec.containers.volumeMounts 5.a 

- mountPath: /opt/onap/sdnc/bin/startODL.sh
name: sdnc-startodl
- mountPath: /opt/opendaylight/current/deploy
name: sdnc-deploy

N/A
 .spec.volumes 5.b 

- name: sdnc-deploy
hostPath:
path: /home/ubuntu/cluster/deploy
- name: sdnc-startodl
hostPath:
path: /home/ubuntu/cluster/script/cluster-startODL.sh

N/A
3
web-deployment.yaml
FieldNew valueOld value
.apiVersion

apps/v1beta1 1

extensions/v1beta1

.kindStatefulSetDeployment
.spec.serviceName"sdnc-portal" 2N/A
.spec.replicas3N/A
.spec.podManagementPolicy"Parellel" 3N/A
4sdnc-pv-pvc.yaml

Changed from Static volume mounts (PV and PVCs) to Dynamic volume mounts

New ValueOld Value
Dynamic Volume Provisioner
kind: Service
apiVersion: v1
metadata:
  name: nfs-provisioner
  namespace: "{{ .Values.nsPrefix }}-sdnc"
  labels:
    app: nfs-provisioner
spec:
  ports:
    - name: nfs
      port: 2049
    - name: mountd
      port: 20048
    - name: rpcbind
      port: 111
    - name: rpcbind-udp
      port: 111
      protocol: UDP
  selector:
    app: nfs-provisioner
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  name: nfs-provisioner
  namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
  replicas: 1
  strategy:
    type: Recreate 
  template:
    metadata:
      labels:
        app: nfs-provisioner
    spec:
      containers:
        - name: nfs-provisioner
          image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.8
          ports:
            - name: nfs
              containerPort: 2049
            - name: mountd
              containerPort: 20048
            - name: rpcbind
              containerPort: 111
            - name: rpcbind-udp
              containerPort: 111
              protocol: UDP
          securityContext:
            capabilities:
              add:
                - DAC_READ_SEARCH
                - SYS_RESOURCE
          args:
            - "-provisioner=sdnc/nfs"
          env:
            - name: POD_IP
              valueFrom:
                fieldRef:
                  fieldPath: status.podIP
            - name: SERVICE_NAME
              value: nfs-provisioner
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          imagePullPolicy: "IfNotPresent"
          volumeMounts:
            - name: export-volume
              mountPath: /export
      volumes:
        - name: export-volume
          hostPath:
            path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/data
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: "{{ .Values.nsPrefix }}-sdnc-data"
  namespace: "{{ .Values.nsPrefix }}-sdnc"
provisioner: sdnc/nfs
Static volume
kind: PersistentVolume
metadata:
  name: "{{ .Values.nsPrefix }}-sdnc-db"
  namespace: "{{ .Values.nsPrefix }}-sdnc"
  labels:
    name: "{{ .Values.nsPrefix }}-sdnc-db"
spec:
  capacity:
    storage: 2Gi
  accessModes:
    - ReadWriteMany
  persistentVolumeReclaimPolicy: Retain
  hostPath:
    path: /dockerdata-nfs/{{ .Values.nsPrefix }}/sdnc/data
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: sdnc-db
  namespace: "{{ .Values.nsPrefix }}-sdnc"
spec:
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 2Gi
  selector:
    matchLabels:
      name: "{{ .Values.nsPrefix }}-sdnc-db"
5
all-services.yaml



  • Add a headless service of sdnc 6
---
apiVersion: v1
kind: Service
metadata:
 name: sdnhostcluster
 namespace: onap-sdnc
 labels:
   app: sdnc
 annotations:
   service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
 ports:
 - name: "sdnc-cluster-port"
   port: 2550
 clusterIP: None
 selector:
   app: sdnc
 sessionAffinity: None
 type: ClusterIP

(Find sdnhost service and add expose one more port under .spec.ports)

- name: "sdnc-jolokia-port-8080"
  port: 9090
  targetPort: 8080
  nodePort: {{ .Values.nodePortPrefix }}00


#Files under {$OOM}/kubernetes/sdncChanged/Added fields and values
1values.yamlDefined a new variable "mysql: mysql:5.6"



Notes:

  1. Use .apiVersion "apps/v1beta1" for the Kubernetes version before 1.8.0; otherwise, use .apiVersion "apps/v1beta2"
    • Check the Kubernetes version using the command "kubectl version"

       Example of kubernetes version 1.7.7

      Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.2", GitCommit:"bdaeafa71f6c7c04636251031f93464384d54963", GitTreeState:"clean", BuildDate:"2017-10-24T19:48:57Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}
      Server Version: version.Info{Major:"1", Minor:"7+", GitVersion:"v1.7.7-rancher1", GitCommit:"a1ea37c6f6d21f315a07631b17b9537881e1986a", GitTreeState:"clean", BuildDate:"2017-10-02T21:33:08Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}

       Example of kubernetes version 1.8.3

      Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.3", GitCommit:"f0efb3cb883751c5ffdbe6d515f3cb4fbe7b7acd", GitTreeState:"clean", BuildDate:"2017-11-08T18:39:33Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}

      Server Version: version.Info{Major:"1", Minor:"8+", GitVersion:"v1.8.3-rancher1", GitCommit:"beb8311a9f114ba92558d8d771a81b7fb38422ae", GitTreeState:"clean", BuildDate:"2017-11-14T00:54:19Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}

  2. The value must align with the associated service name in the all_services.yaml file under the same directory.
  3. By default, .spec.podManagementPolicy has the value "OrderReady".
    • With the value "OrderReady", the Kubernetes pod management tells the StatefulSet controller to respect the ordering guarantees, waiting for a Pod to become Running and Ready or completely terminate and then launching or terminating another Pod.
    • With the value "Parallel", the Kubernetes pod management tells the StatefulSet controller to launch or terminate all Pods in parallel, not waiting for Pods to become Running and Ready or completely terminated prior to launching or terminating another Pod.
  4. Export 2 new ports for
  5. Since StartODL.sh has to be changed to enable clusters to function, two paths must be mounted:
    1. mount /home/ubuntu/cluster/script/cluster-startODL.sh (local) to replace /opt/onap/sdnc/bin/startODL.sh (docker), so that we can use our local updated script with cluster config.
    2. mount /home/ubuntu/cluster/deploy (local) to /opt/opendaylight/current/deploy (docker), so that we can dynamically deploy test bundles outside pods.
  6. The newly added headless service is SDNC pods in SDNC cluster to be able to find each other with fixed FQDN directly.

Create New Script: cluster-startODL.sh

This is manual for now, It can be automated when we automate the SDN-C cluster deployment.

Create cluster-startODL.sh under /home/ubuntu/cluster/script/

cluster-startODL.sh
#!/bin/bash
###
# ============LICENSE_START=======================================================
# openECOMP : SDN-C
# ================================================================================
# Copyright (C) 2017 AT&T Intellectual Property. All rights
#                                                       reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
###
function enable_odl_cluster(){
  echo "Installing Opendaylight cluster features"
  ${ODL_HOME}/bin/client -u karaf feature:install odl-mdsal-clustering
  ${ODL_HOME}/bin/client -u karaf feature:install odl-jolokia
  echo "Update cluster information statically"
  hm=$(hostname)
  echo "Get current Hostname ${hm}"
  #TODO Do naming check
  node=($(echo ${hm} | tr '-' '\n'))
  node_name=${node[0]}
  node_index=${node[1]}
  #TODO for dynamic clustering, have to use rest call to Master server
  #for getting the real replication number
  #sdnhostcluster should be the same as headless service
  node_list="${node_name}-0.sdnhostcluster.onap-sdnc.svc.cluster.local";
  for ((i=1;i<=2;i++));
  do
    node_list="${node_list} ${node_name}-$i.sdnhostcluster.onap-sdnc.svc.cluster.local"
  done
  /opt/opendaylight/current/bin/configure_cluster.sh $((node_index+1)) ${node_list}
}
# Install SDN-C platform components if not already installed and start container
ODL_HOME=${ODL_HOME:-/opt/opendaylight/current}
ODL_ADMIN_PASSWORD=${ODL_ADMIN_PASSWORD:-Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U}
SDNC_HOME=${SDNC_HOME:-/opt/onap/sdnc}
SLEEP_TIME=${SLEEP_TIME:-120}
MYSQL_PASSWD=${MYSQL_PASSWD:-openECOMP1.0}
#
# Wait for database
#
echo "Waiting for mysql"
until mysql -h dbhost -u root -p${MYSQL_PASSWD} mysql &> /dev/null
do
  printf "."
  sleep 1
done
echo -e "\nmysql ready"
if [ ! -f ${SDNC_HOME}/.installed ]
then
        echo "Installing SDN-C database"
        ${SDNC_HOME}/bin/installSdncDb.sh
        echo "Starting OpenDaylight"
        ${ODL_HOME}/bin/start
        echo "Waiting ${SLEEP_TIME} seconds for OpenDaylight to initialize"
        sleep ${SLEEP_TIME}
        echo "Installing SDN-C platform features"
        ${SDNC_HOME}/bin/installFeatures.sh
        if [ -x ${SDNC_HOME}/svclogic/bin/install.sh ]
        then
                echo "Installing directed graphs"
                ${SDNC_HOME}/svclogic/bin/install.sh
        fi
        enable_odl_cluster
        echo "Restarting OpenDaylight"
        ${ODL_HOME}/bin/stop
        echo "Installed at `date`" > ${SDNC_HOME}/.installed
fi
exec ${ODL_HOME}/bin/karaf


Create the ONAP Config


#PurposeCommand and Examples
0.1

(Only Once) Create the ONAP config using a sample YAML file

cd {$OOM}/kubernetes/config

cp onap-parameters-sample.yaml onap-parameters.yaml

0

Set the OOM Kubernetes config environment

cd {$OOM}/kubernetes/oneclick

source setenv.bash

1

Run the createConfig script to create the ONAP config

cd {$OOM}/kubernetes/config
./createConfig.sh -n onap

 Example of createConfig output

**** Creating configuration for ONAP instance: onap

namespace "onap" created

NAME:   onap-config

LAST DEPLOYED: Wed Nov  8 20:47:35 2017

NAMESPACE: onap

STATUS: DEPLOYED

 

RESOURCES:

==> v1/ConfigMap

NAME                   DATA  AGE

global-onap-configmap  15    0s

 

==> v1/Pod

NAME    READY  STATUS             RESTARTS  AGE

config  0/1    ContainerCreating  0         0s

 

 

**** Done ****

Wait for the config-init container to finish

Use the following command to monitor onap config init intil it reaches to Completed STATUS:

kubctl get pod --all-namespaces -a

 Example of final output

The final output should be shown as the the following with onap config in Completed STATUS:

Additional checks for config-init
helm

helm ls --all

 Example of output

NAME REVISION UPDATED STATUS CHART NAMESPACE
onap-config 1 Tue Nov 21 17:07:13 2017 DEPLOYED config-1.1.0 onap

helm status onap-config

 Example of output

LAST DEPLOYED: Tue Nov 21 17:07:13 2017
NAMESPACE: onap
STATUS: DEPLOYED

RESOURCES:
==> v1/ConfigMap
NAME DATA AGE
global-onap-configmap 15 2d

==> v1/Pod
NAME READY STATUS RESTARTS AGE
config 0/1 Completed 0 2d

 kubernetes namespaces

kubectl get namespaces

 Example of output

NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
onap Active 2d


Deploy the SDN-C Application

#PurposeCommand and Examples
0

Set the OOM Kubernetes config environment

(If you have set the OOM Kubernetes config enviorment in the same terminal, you can skip this step)

cd {$OOM}/kubernetes/oneclick

source setenv.bash

1

Run the createAll script to deploy the SDN-C appilication

cd {$OOM}/kubernetes/oneclick

./createAll.bash -n onap -a sdnc

 Example of createAll output

********** Creating instance 1 of ONAP with port range 30200 and 30399

********** Creating ONAP:


********** Creating deployments for sdnc **********

Creating namespace **********
namespace "onap-sdnc" created

Creating service account **********
clusterrolebinding "onap-sdnc-admin-binding" created

Creating registry secret **********
secret "onap-docker-registry-key" created

Creating deployments and services **********
NAME: onap-sdnc
LAST DEPLOYED: Thu Nov 23 20:13:32 2017
NAMESPACE: onap
STATUS: DEPLOYED

RESOURCES:
==> v1/PersistentVolume
NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE
onap-sdnc-db 2Gi RWX Retain Bound onap-sdnc/sdnc-db 1s

==> v1/PersistentVolumeClaim
NAME STATUS VOLUME CAPACITY ACCESSMODES AGE
sdnc-db Bound onap-sdnc-db 2Gi RWX 1s

==> v1/Service
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
dbhost None <none> 3306/TCP 1s
sdnctldb01 None <none> 3306/TCP 1s
sdnctldb02 None <none> 3306/TCP 1s
sdnc-dgbuilder 10.43.97.219 <nodes> 3000:30203/TCP 1s
sdnhost 10.43.99.163 <nodes> 8282:30202/TCP,8201:30208/TCP 1s
sdnc-portal 10.43.72.72 <nodes> 8843:30201/TCP 1s

==> extensions/v1beta1/Deployment
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
sdnc-dgbuilder 1 1 1 0 1s

==> apps/v1beta1/StatefulSet
NAME DESIRED CURRENT AGE
sdnc-dbhost 2 1 1s
sdnc 3 3 1s
sdnc-portal 2 2 1s

 


**** Done ****


Ensure that the SDN-C appication has started

Use the kubectl get pods command to monitor the SDN-C startup; you should observe:

  • sdnc-dbhost-0 pod starts and gets into Running STATUS first,
    • while
      • sdnc-dbhost-1 pod does not exist and
      • sdnc, sdnc-dgbuilder and sdnc-portal pods are staying in Init:0/1 STATUS
  • once sdnc-dbhost-0 pod is fully started with the READY "1/1",
    • sdnc-dbhost-1 will be starting from ContainerCreating STATUS and runs up to Running STATUS
  • once sdnc-dbhost-1 pod is in RunningSTATUS,
    • sdnc pods will be starting from PodInitializing STATUS and end up with Running STATUS in parallel
    • while
      • sdnc-dgbuilder and sdnc-portal pods are staying in Init:0/1 STATUS
  • once sdnc pods are (is) in Running STATUS,
    • sdnc-dgbuilder and sdnc-portal will be starting from PodInitializing STATUS and end up with Running STATUS in parllel
 Example of start up status changes

2

Validate that all SDN-C pods and services are created properly

helm ls --all

 Example of SDNC release

ubuntu@sdnc-k8s:~$ helm ls --all
NAME REVISION UPDATED STATUS CHART NAMESPACE
onap-config 1 Tue Nov 21 17:07:13 2017 DEPLOYED config-1.1.0 onap
onap-sdnc 1 Thu Nov 23 20:13:32 2017 DEPLOYED sdnc-0.1.0 onap
ubuntu@sdnc-k8s:~$

kubectl get namespaces

 Example of SDNC namespace

ubuntu@sdnc-k8s:~$ kubectl get namespaces
NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
onap Active 2d
onap-sdnc Active 12m
ubuntu@sdnc-k8s:~$

kubectl get deployment --all-namespaces

 Example of SDNC deployment

ubuntu@sdnc-k8s-2:~$ kubectl get deployment --all-namespaces
NAMESPACE NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
kube-system heapster 1 1 1 1 15d
kube-system kube-dns 1 1 1 1 15d
kube-system kubernetes-dashboard 1 1 1 1 15d
kube-system monitoring-grafana 1 1 1 1 15d
kube-system monitoring-influxdb 1 1 1 1 15d
kube-system tiller-deploy 1 1 1 1 15d
onap-sdnc sdnc-dgbuilder 1 1 1 0 26m
ubuntu@sdnc-k8s-2:~$

kubectl get clusterrolebinding --all-namespaces

 Example of SDNC cluster role binding

ubuntu@sdnc-k8s:~$ kubectl get clusterrolebinding --all-namespaces
NAMESPACE NAME AGE
addons-binding 15d
onap-sdnc-admin-binding 13m
ubuntu@sdnc-k8s:~$

kubectl get serviceaccounts --all-namespaces

 Example of SDNC service account

ubuntu@sdnc-k8s:~$ kubectl get serviceaccounts --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 15d
kube-public default 1 15d
kube-system default 1 15d
kube-system io-rancher-system 1 15d
onap default 1 2d
onap-sdnc default 1 14m
ubuntu@sdnc-k8s:~$

kubectl get service --all-namespaces

 Example of all SDNC services

ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$ kubectl get service --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.43.0.1 <none> 443/TCP 15d
kube-system heapster ClusterIP 10.43.210.11 <none> 80/TCP 15d
kube-system kube-dns ClusterIP 10.43.0.10 <none> 53/UDP,53/TCP 15d
kube-system kubernetes-dashboard ClusterIP 10.43.196.205 <none> 9090/TCP 15d
kube-system monitoring-grafana ClusterIP 10.43.90.8 <none> 80/TCP 15d
kube-system monitoring-influxdb ClusterIP 10.43.52.1 <none> 8086/TCP 15d
kube-system tiller-deploy ClusterIP 10.43.106.73 <none> 44134/TCP 15d
onap-sdnc dbhost ClusterIP None <none> 3306/TCP 17m
onap-sdnc sdnc-dgbuilder NodePort 10.43.97.219 <none> 3000:30203/TCP 17m
onap-sdnc sdnc-portal NodePort 10.43.72.72 <none> 8843:30201/TCP 17m
onap-sdnc sdnctldb01 ClusterIP None <none> 3306/TCP 17m
onap-sdnc sdnctldb02 ClusterIP None <none> 3306/TCP 17m
onap-sdnc sdnhost NodePort 10.43.99.163 <none> 8282:30202/TCP,8201:30208/TCP 17m
ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$

kubectl get pods --all-namespaces -a

 Example of all SDNC pods

ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$ kubectl get pods --all-namespaces -a
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system heapster-4285517626-32km8 1/1 Running 0 15d
kube-system kube-dns-638003847-vqz8t 3/3 Running 0 15d
kube-system kubernetes-dashboard-716739405-tnxj6 1/1 Running 0 15d
kube-system monitoring-grafana-2360823841-qfhzm 1/1 Running 0 15d
kube-system monitoring-influxdb-2323019309-41q0l 1/1 Running 0 15d
kube-system tiller-deploy-737598192-5663c 1/1 Running 0 15d
onap config 0/1 Completed 0 2d
onap-sdnc sdnc-0 2/2 Running 0 17m
onap-sdnc sdnc-1 0/2 CrashLoopBackOff 16 17m
onap-sdnc sdnc-2 2/2 Running 0 17m
onap-sdnc sdnc-dbhost-0 1/1 Running 0 17m
onap-sdnc sdnc-dbhost-1 1/1 Running 0 16m
onap-sdnc sdnc-dgbuilder-356329770-cpfzj 0/1 Running 6 17m
onap-sdnc sdnc-portal-0 0/1 Running 6 17m
onap-sdnc sdnc-portal-1 0/1 CrashLoopBackOff 7 17m
ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$

docker ps |grep sdnc

 Example of SDNC docker container

$ docker ps |grep sdnc |wc -l
14
$ docker ps |grep sdnc

9a1fc91b6dcc docker.elastic.co/beats/filebeat@sha256:fe7602b641ed8ee288f067f7b31ebde14644c4722d9f7960f176d621097a5942 "filebeat -e" 19 minutes ago Up 19 minutes k8s_filebeat-onap_sdnc-2_onap-sdnc_c7feb9d1-d08a-11e7-957f-0269cb13eff1_0
9dbaa04e160c docker.elastic.co/beats/filebeat@sha256:fe7602b641ed8ee288f067f7b31ebde14644c4722d9f7960f176d621097a5942 "filebeat -e" 19 minutes ago Up 19 minutes k8s_filebeat-onap_sdnc-0_onap-sdnc_c7f28480-d08a-11e7-957f-0269cb13eff1_0
fca36e9b5353 nexus3.onap.org:10001/onap/sdnc-image@sha256:1049151464b3e60d9a553bc2f3bdaf79555839217f0557652e982ca99398375a "/opt/onap/sdnc/bin/s" 19 minutes ago Up 19 minutes k8s_sdnc-controller-container_sdnc-2_onap-sdnc_c7feb9d1-d08a-11e7-957f-0269cb13eff1_0
00efa164a58a nexus3.onap.org:10001/onap/sdnc-image@sha256:1049151464b3e60d9a553bc2f3bdaf79555839217f0557652e982ca99398375a "/opt/onap/sdnc/bin/s" 19 minutes ago Up 19 minutes k8s_sdnc-controller-container_sdnc-0_onap-sdnc_c7f28480-d08a-11e7-957f-0269cb13eff1_0
4a2769dfee37 mysql/mysql-server@sha256:720f301388709af2c84ee09ba51340d09d1e9f7ba45f19719b5b18b5fa696771 "/entrypoint.sh mysql" 19 minutes ago Up 19 minutes (healthy) k8s_sdnc-db-container_sdnc-dbhost-0_onap-sdnc_c7e050b3-d08a-11e7-957f-0269cb13eff1_0

8b4a21cb2bd2 mysql/mysql-server@sha256:720f301388709af2c84ee09ba51340d09d1e9f7ba45f19719b5b18b5fa696771 "/entrypoint.sh mysql" 19 minutes ago Up 19 minutes (healthy) k8s_sdnc-db-container_sdnc-dbhost-1_onap-sdnc_cde0fde0-d08a-11e7-957f-0269cb13eff1_0

04904cb18336 gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-portal-0_onap-sdnc_c810f3e4-d08a-11e7-957f-0269cb13eff1_0
e89a3e28505a gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-2_onap-sdnc_c7feb9d1-d08a-11e7-957f-0269cb13eff1_0
7d2c4ac066f4 gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-dgbuilder-356329770-cpfzj_onap-sdnc_c7dd4b22-d08a-11e7-957f-0269cb13eff1_0
660ee9119001 gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-0_onap-sdnc_c7f28480-d08a-11e7-957f-0269cb13eff1_0
fa0e59e1a7c7 gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-dbhost-0_onap-sdnc_c7e050b3-d08a-11e7-957f-0269cb13eff1_0


24b6d3eb3020 gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-dbhost-1_onap-sdnc_cde0fde0-d08a-11e7-957f-0269cb13eff1_0
a6e6445b87eb gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-1_onap-sdnc_c7f6b06f-d08a-11e7-957f-0269cb13eff1_0
1d00b4bb46c0 gcr.io/google_containers/pause-amd64:3.0 "/pause" 19 minutes ago Up 19 minutes k8s_POD_sdnc-portal-1_onap-sdnc_c813c667-d08a-11e7-957f-0269cb13eff1_0

$

In the above example, it missed sdnc-1 container as I had a failure of it due to directory sharing.

3

Validate that the SDN-C bundlers are up

 Enter pod container with 2 options
 Option 1: through pod name from anywhere

Use command

kubectl exec -it <POD_NAME> bash

 Example

 Option 2: through docker container ID from where the container is

Use command

docker exec -it <DOCKER_CONTAINER_ID> bash

 Example

 Check SDNC bundles in ODL client
 Enter ODL client

 Check SDNC bundlers

4

Validate that the SDN-C APIs are shown on the ODL RestConf page

Access the ODL RestConf page from the following URL:

http://<Kubernetes-Master-Node-IP>:30202/apidoc/explorer/index.html

 Example of SDNC APIs in ODL RestConf page

5

Validate the SDN-C ODL cluster

Goal:

Verify if the SDNC ODL-Cluster is running properly

Prerequisites
  1. This test is on one of your Kubernetes nodes
  2. Make sure python-pycurl is installed
    • If not, for Ubuntu use "apt-get install python-pycurl" to install it
Use ODL intergration tool to monitor ODL cluster

Clone ODL Integration-test project

git clone https://github.com/opendaylight/integration-test.git

Enter cluster-montor folder

cd integration-test/tools/clustering/cluster-monitor

Create update-cluster.bash script

vi update-cluster.sh

Content of update-cluster.bash
 #!/bin/bash -x
#get ips string by using kubectl
ips_string=$(kubectl get pods --all-namespaces -o wide | grep 'sdnc-[0-9]' | awk '{print $7}')
ip_list=($(echo ${ips_string} | tr ' ' '\n'))
#loop and replace existing ip
for ((i=0;i<=2;i++));
do
   if [ "${ip_list[$i]}" == "<none>" ]; then
     echo "Ip of deleted pod is not ready yet"
     exit 1;
   fi
   let "j=$i+4"
   sed -i -r "${j}s/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b"/${ip_list[$i]}/ cluster.json
done
python monitor.py

This script is used to fetch all the IPs of SDN-C pods and automatically update cluster.json file

Start cluster montor UI

./update-cluster.bash

Note:

If applications inside any of these three SDNC pods are not fully started, this script won't be executed successfully due to the issues such as connection error, value error and etc. 

Otherwise, you should see the monitoring UI as the following:

Use testCluster RPC to test SDN-C load sharing

The testCluster-bundle.zip provides a testBundle which offers a testCluster API to help with validating SDN-C RPC load sharing in the deployed SDN-C cluster.

It's just as easy as to do the following:

  • download testCluster-bundle.zip from the attachment, and place it to the sdnc-deployhostPath which has been defined in .spec.volumes of sdnc-deployment.yaml file
    • As this hostPath is mounted as ODL's deploy directory, testBundle will be automatically loaded by ODL and the testCluster API will be availble to use.
  • testCluster api can be accessed from ODL RestConf page, postman or curl command:

    •  curl command

      curl -u admin:Kp8bJ4SXszM0WXlhak3eHlcse2gAw84vaoGGmJvUy2U -H 'Accept: application/json' -X POST 'http://${kubernetes-master-api}:30202/restconf/operations/testCluster:who-am-i'

    •  An example of testCluster API response
      {
      	"output": {
      		"node": "sdnc-2"
      	}
      }


Undeploy the SDN-C Application

#PurposeCommand and Examples
0

Aet the OOM Kubernetes config environment

(If you have set OOM kubernetes config enviorment in the same terminal, you can skip this step)

cd {$OOM}/kubernetes/oneclick

source setenv.bash

1

Run the deleteAll script to delete all SDN-C pods and services

./deleteAll.bash -n onap -a sdnc

 Example of output

********** Cleaning up ONAP:
release "onap-sdnc" deleted
namespace "onap-sdnc" deleted
clusterrolebinding "onap-sdnc-admin-binding" deleted
Service account onap-sdnc-admin-binding deleted.

Waiting for namespaces termination...

********** Gone **********

2

Validate that all SDN-C pods and servers are cleaned up

docker ps |grep sdnc

 Example of no more SDNC docker container

ubuntu@sdnc-k8s:~$ docker ps |grep sdnc
ubuntu@sdnc-k8s:~$

kubectl get pods --all-namespaces -a

 Example of no more SDNC pods

ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$ kubectl get pods --all-namespaces -a
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system heapster-4285517626-32km8 1/1 Running 0 15d
kube-system kube-dns-638003847-vqz8t 3/3 Running 0 15d
kube-system kubernetes-dashboard-716739405-tnxj6 1/1 Running 0 15d
kube-system monitoring-grafana-2360823841-qfhzm 1/1 Running 0 15d
kube-system monitoring-influxdb-2323019309-41q0l 1/1 Running 0 15d
kube-system tiller-deploy-737598192-5663c 1/1 Running 0 15d
onap config 0/1 Completed 0 2d
ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$

kubectl get service --all-namespaces

 Example of no more SDNC services

ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$ kubectl get service --all-namespaces
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
default kubernetes ClusterIP 10.43.0.1 <none> 443/TCP 15d
kube-system heapster ClusterIP 10.43.210.11 <none> 80/TCP 15d
kube-system kube-dns ClusterIP 10.43.0.10 <none> 53/UDP,53/TCP 15d
kube-system kubernetes-dashboard ClusterIP 10.43.196.205 <none> 9090/TCP 15d
kube-system monitoring-grafana ClusterIP 10.43.90.8 <none> 80/TCP 15d
kube-system monitoring-influxdb ClusterIP 10.43.52.1 <none> 8086/TCP 15d
kube-system tiller-deploy ClusterIP 10.43.106.73 <none> 44134/TCP 15d
ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$

kubectl get serviceaccounts --all-namespaces

 Example of no more SDNC service account

ubuntu@sdnc-k8s:~$ kubectl get serviceaccounts --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 15d
kube-public default 1 15d
kube-system default 1 15d
kube-system io-rancher-system 1 15d
onap default 1 2d
ubuntu@sdnc-k8s:~$

kubectl get clusterrolebinding --all-namespaces

 Example of no more SDNC cluster role binding

ubuntu@sdnc-k8s:~$ kubectl get clusterrolebinding --all-namespaces
NAMESPACE NAME AGE
addons-binding 15d
ubuntu@sdnc-k8s:~$

kubectl get deployment --all-namespaces

 Example of no more SDNC deployment

ubuntu@sdnc-k8s:~$ kubectl get deployment --all-namespaces

NAMESPACE     NAME                   DESIRED   CURRENT   UP-TO-DATE   AVAILABLE   AGE

kube-system   heapster               1         1         1            1           2d

kube-system   kube-dns               1         1         1            1           2d

kube-system   kubernetes-dashboard   1         1         1            1           2d

kube-system   monitoring-grafana     1         1         1            1           2d

kube-system   monitoring-influxdb    1         1         1            1           2d

kube-system   tiller-deploy          1         1         1            1           2d

ubuntu@sdnc-k8s:~$

kubectl get namespaces

 Example of no more SDNC namespace

ubuntu@sdnc-k8s:~$ kubectl get namespaces
NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
onap Active 2d
ubuntu@sdnc-k8s:~$

helm ls --all

 Example of no more SDNC release

ubuntu@sdnc-k8s:~$ helm ls --all
NAME REVISION UPDATED STATUS CHART NAMESPACE
onap-config 1 Tue Nov 21 17:07:13 2017 DEPLOYED config-1.1.0 onap
ubuntu@sdnc-k8s:~$


Remove the ONAP Config

#PurposeCommand and Examples
0

Set the OOM Kubernetes config environment

(If you have set OOM kubernetes config enviorment in the same terminal, you can skip this step)

cd {$OOM}/kubernetes/oneclick

source setenv.bash

1

Remove the ONAP config and any deployed applications in one shot

./deleteAll.bash -n onap

 Example of removing ONAP config output

ubuntu@sdnc-k8s:~/oom/kubernetes/oneclick$ ./deleteAll.bash -n onap

 

********** Cleaning up ONAP:

Error: release: not found

Error from server (NotFound): namespaces "onap-consul" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-consul-admin-binding" not found

Service account onap-consul-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-msb" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-msb-admin-binding" not found

Service account onap-msb-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-mso" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-mso-admin-binding" not found

Service account onap-mso-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-message-router" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-message-router-admin-binding" not found

Service account onap-message-router-admin-binding deleted.ls

 

 

release "onap-sdnc" deleted

namespace "onap-sdnc" deleted

clusterrolebinding "onap-sdnc-admin-binding" deleted

Service account onap-sdnc-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-vid" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-vid-admin-binding" not found

Service account onap-vid-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-robot" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-robot-admin-binding" not found

Service account onap-robot-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-portal" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-portal-admin-binding" not found

Service account onap-portal-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-policy" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-policy-admin-binding" not found

Service account onap-policy-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-appc" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-appc-admin-binding" not found

Service account onap-appc-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-aai" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-aai-admin-binding" not found

Service account onap-aai-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-sdc" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-sdc-admin-binding" not found

Service account onap-sdc-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-dcaegen2" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-dcaegen2-admin-binding" not found

Service account onap-dcaegen2-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-log" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-log-admin-binding" not found

Service account onap-log-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-cli" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-cli-admin-binding" not found

Service account onap-cli-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-multicloud" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-multicloud-admin-binding" not found

Service account onap-multicloud-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-clamp" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-clamp-admin-binding" not found

Service account onap-clamp-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-vnfsdk" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-vnfsdk-admin-binding" not found

Service account onap-vnfsdk-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-uui" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-uui-admin-binding" not found

Service account onap-uui-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-aaf" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-aaf-admin-binding" not found

Service account onap-aaf-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-vfc" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-vfc-admin-binding" not found

Service account onap-vfc-admin-binding deleted.

 

Error: release: not found

Error from server (NotFound): namespaces "onap-kube2msb" not found

Error from server (NotFound): clusterrolebindings.rbac.authorization.k8s.io "onap-kube2msb-admin-binding" not found

Service account onap-kube2msb-admin-binding deleted.

 

Waiting for namespaces termination...

 

********** Gone **********

2

Manually clean up

This step is to clean up the leftover items which were created by the config/createConfig script but not cleaned up by the oneclick/deleteAll script.

 Example of left over ONAP config

ubuntu@sdnc-k8s:~/oom/kubernetes/config$ ./createConfig.sh -n onap

**** Creating configuration for ONAP instance: onap
Error from server (AlreadyExists): namespaces "onap" already exists
Error: a release named "onap-config" already exists.
Please run: helm ls --all "onap-config"; helm del --help
**** Done ****
ubuntu@sdnc-k8s:

ONAP serviceaccount

No action needed.

It cannot be deleted by a specific command, but will instead be automatically deleted when the namespace is deleted.

 Example of service account can not be deleted

ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get serviceaccounts --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 15d
kube-public default 1 15d
kube-system default 1 15d
kube-system io-rancher-system 1 15d
onap default 1 2d
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl delete serviceaccounts default -n onap
serviceaccount "default" deleted
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get serviceaccounts --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 15d
kube-public default 1 15d
kube-system default 1 15d
kube-system io-rancher-system 1 15d
onap default 1 6s
ubuntu@sdnc-k8s:

... after ONAP namespace is deleted...

ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get namespaces
NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get serviceaccounts --all-namespaces
NAMESPACE NAME SECRETS AGE
default default 1 15d
kube-public default 1 15d
kube-system default 1 15d
kube-system io-rancher-system 1 15d
ubuntu@sdnc-k8s:~/oom/kubernetes/config$

ONAP namespace 
 Example of deleting ONAP name space

ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get namespaces
NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
onap Active 2d
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl delete namespace onap
namespace "onap" deleted
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get namespaces
NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
onap Terminating 2d
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ kubectl get namespaces
NAME STATUS AGE
default Active 15d
kube-public Active 15d
kube-system Active 15d
ubuntu@sdnc-k8s:~/oom/kubernetes/config$

 release
 Example of deleting ONAP config release

ubuntu@sdnc-k8s:~/oom/kubernetes/config$ helm ls --all
NAME REVISION UPDATED STATUS CHART NAMESPACE
onap-config 1 Tue Nov 21 17:07:13 2017 DEPLOYED config-1.1.0 onap
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ helm delete onap-config --purge
release "onap-config" deleted
ubuntu@sdnc-k8s:~/oom/kubernetes/config$ helm ls --all
ubuntu@sdnc-k8s:~/oom/kubernetes/config$

3

Delete the shared folder

 sudo rm -rf /dockerdata-nfs/onap


Scripts


The following scripts help to simplify various procedures by automating them   (smile)

autoCreateOnapConfig
########################################################################################
# This script replaces {$OOM}/kubernetes/config/createConfig.sh script                 #
# and will only terminated when the ONAP configuration is Completed                    #
#                                                                                      #
# Before using it, do the following to prepare the bash file:                          #
#   1, cd {$OOM}/kumbernetes/oneclick                                                  #
#   2, vi autoCreateOnapConfig.bash                                                    #
#   3, paste the full content here to autoCreateOnapConfig.bash file and save the file #
#   4, chmod 777 autoCreateOnapConfig.bash                                             #
# To run it, just enter the following command:                                         #
#    ./autoCreateOnapConfig.bash                                                       #
########################################################################################
#!/bin/bash


echo "Create ONAP config under config directory..."
cd ../config
./createConfig.sh -n onap
cd -


echo "...done : kubectl get namespace
-----------------------------------------------
>>>>>>>>>>>>>> k8s namespace"
kubectl get namespace


echo "
-----------------------------------------------
>>>>>>>>>>>>>> helm : helm ls --all"
helm ls --all


echo "
-----------------------------------------------
>>>>>>>>>>>>>> pod : kubectl get pods --all-namespaces -a"
kubectl get pods --all-namespaces -a


status=`kubectl get pods --all-namespaces -a |grep onap |xargs echo | cut -d' ' -f4`
while true
do
  echo "wait for onap config pod reach to Completed STATUS"
  sleep 5
  echo "-----------------------------------------------"
  kubectl get pods --all-namespaces -a
  status=`kubectl get pods --all-namespaces -a |grep onap |xargs echo | cut -d' ' -f4`
  if [ "$status" = "Completed" ]
  then
    echo "onap config is Completed!!!"
    break
  fi
done
autoCleanOnapConfig
########################################################################################
# This script wraps {$OOM}/kubernetes/oneclick/deleteAll.sh script along with          #
# the following steps to clean up ONAP configure:                                      #
#     - remove ONAP namespace                                                          #
#     - remove ONAP release                                                            #
#     - remove ONAP shared directory                                                   #
#                                                                                      #
# Before using it, do the following to prepare the bash file:                          #
#   1, cd {$OOM}/kumbernetes/oneclick                                                  #
#   2, vi autoCleanOnapConfig.bash                                                     #
#   3, paste the full content here to autoCleanOnapConfig.bash file and save the file  #
#   4, chmod 777 autoCleanOnapConfig.bash                                              #
# To run it, just enter the following command:                                         #
#    ./autoCleanOnapConfig.bash                                                        #
########################################################################################
#!/bin/bash

./deleteAll.bash -n onap

echo "----------------------------------------------
Force remove namespace..."
kubectl delete namespace onap
echo "...done"
kubectl get namespace

echo "Force delete helm process ..."
helm delete onap-config --purge --debug
echo "...done"
helm ls --all

echo "Remove ONAP dockerdata..."
sudo rm -rf /dockerdata-nfs/onap
echo "...done"
ls -altr /dockerdata-nfs
autoDeploySdnc
########################################################################################
# This script wraps {$OOM}/kubernetes/oneclick/createAll.sh script along with          #
# the following steps to deploy ONAP SDNC application:                                 #
#     - wait until sdnc-0 is running properly with both (2) containers up              #
#                                                                                      #
# Before using it, do the following to prepare the bash file:                          #
#   1, cd {$OOM}/kumbernetes/oneclick                                                  #
#   2, vi autoDeploySdnc.bash                                                          #
#   3, paste the full content here to autoDeploySdnc.bash file and save the file       #
#   4, chmod 777 autoDeploySdnc.bash                                                   #
# To run it, just enter the following command:                                         #
#    ./autoDeploySdnc.bash                                                             #
########################################################################################
#!/bin/bash

echo "Deploy SDNC..."
./createAll.bash -n onap -a sdnc

echo "...done
-----------------------------------------------
>>>>>>>>>>>>>> pod : kubectl get pods --all-namespaces -a"
kubectl get pods --all-namespaces -a

status=`kubectl get pods --all-namespaces -a |grep sdnc-0 |xargs echo | cut -d' ' -f3`
while true
do
  echo "wait for onap sdnc-0 reachs fully running"
  sleep 5
  echo "-----------------------------------------------"
  kubectl get pods --all-namespaces -a

  status=`kubectl get pods --all-namespaces -a |grep sdnc-0 |xargs echo | cut -d' ' -f3`
  if [ "$status" = "2/2" ]
  then
    echo "onap sdnc-0 is running!!!"
    break
  fi
done
autoCleanSdnc
########################################################################################
# This script wraps {$OOM}/kubernetes/oneclick/deleteAll.sh script along with          #
# the following steps to un-deploy ONAP SDNC application fully:                        #
#     - force remove clusterrolebinding for onap-sdnc                                  #
#     - force remove namespace for onap-sdnc                                           #
#     - force remove release for onap-sdnc                                             #
#     - wait until onap-sdnc namespace is remvoed                                      #
#                                                                                      #
# Before using it, do the following to prepare the bash file:                          #
#   1, cd {$OOM}/kumbernetes/oneclick                                                  #
#   2, vi autoCleanSdnc.bash                                                           #
#   3, paste the full content here to autoCleanSdnc.bash file and save the file        #
#   4, chmod 777 autoCleanSdnc.bash                                                    #
# To run it, just enter the following command:                                         #
#    ./autoCleanSdnc.bash                                                              #
########################################################################################
#!/bin/bash

./deleteAll.bash -n onap -a sdnc

echo "----------------------------------------------
Remove clusterrolebindnig..."
kubectl delete clusterrolebinding onap-sdnc-admin-binding
echo "...done : kubectl get clusterrolebinding"
kubectl get clusterrolebinding

echo "Remove onap-sdnc namespace..."
kubectl delete namespaces onap-sdnc
echo "...done : kubectl get namespaces"
kubectl get namespaces

echo "Delete onap-sdnc release..."
helm delete onap-sdnc --purge
echo "...done: helm ls --all"
helm ls --all


sdncCount=`kubectl get namespaces | grep onap-sdnc | wc -l`
while true
do
  echo "wait for onap-sdnc namespace to be removed"
  sleep 5
  echo "-----------------------------------------------"
  kubectl get namespaces

  sdncCount=`kubectl get namespaces | grep onap-sdnc | wc -l`
  if [ "$sdncCount" = "0" ]
  then
    echo "sdnc removed!!!"
    break
  fi
done
  • No labels