...
Committing Code
Code Block |
---|
|
# clone
git clone ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
# modify files
# stage your changes
git add .
git commit -m "your commit message"
# commit your staged changes with sign-off
git commit -s --amend
# add Issue-ID after Change-ID
# Submit your commit to ONAP Gerrit for review
git review
# goto https://gerrit.onap.org/r/#/dashboard/self |
...
Workstation configuration
Ubuntu 16.04 on VMware Workstation
...
15 or Fusion 8 or AWS/Azure VM
Note: do not use the gui upgrade (will cause the vm to periodically lock) - do individual apt-get 's
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# start with clean VM, I use root, you can use the recommended non-root account
sudo vi /etc/hosts
# add your hostname to ::1 and 127.0.0.1 or each sudo command will hang for up to 10 sec on DNS resolution especially on ubuntu 18.04
sudo apt-get update
sudo apt-get install openjdk-8-jdk
# not in headless vm
sudo apt-get install ubuntu-desktop
#sudo apt-get install git
sudo apt-get install maven
#or
sudo wget http://apache.mirror.gtcomm.net/maven/maven-3/3.5.4/binaries/apache-maven-3.5.4-bin.tar.gz
sudo cp ap(tab) /opt
cd /opt
tar -xvf apache-maven-3.5.4-bin.tar.gz
sudo vi /etc/environment
MAVEN_OPTS="-Xms8192 -Djava.net.preferIPv4Stack=true"
# restart the terminal
ubuntu@ip-172-31-78-76:~$ mvn -version
Apache Maven 3.5.4 (1edded0938998edf8bf061f1ceb3cfdeccf443fe; 2018-06-17T18:33:14Z)
Maven home: /opt/apache-maven-3.5.4
Java version: 1.8.0_171, vendor: Oracle Corporation, runtime: /usr/lib/jvm/java-8-openjdk-amd64/jre
sudo vi ~/.ssh/config
Host *
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
# a couple options on copying the ssh key
# from another machine
root@ubuntu:~/_dev# cat ~/.ssh/id_rsa | ssh
#-i ~/.ssh/onap_rsa ubuntu@ons.onap.info 'cat >> .ssh/onap_rsa && echo "Key copied"'
Key copied
sudo chown ubuntu:ubuntu ~/.ssh/onap_rsa
# or
# scp onap gerrit cert into VM from host macbook
obrien:obrienlabs amdocs$ scp ~/.ssh/onap_rsa amdocs@192.168.211.129:~/
move to root
sudo su -
root@obriensystemsu0:~# cp /home/amdocs/onap_rsa .
ls /home/amdocs/.m2
cp onap_rsa ~/.ssh/id_rsa
chmod 400 ~/.ssh/id_rsa
# move from root to ubuntu - if using non-root user
sudo chown ubuntu:ubuntu ~/.ssh/onap_rsa
# test your gerrit access
sudo git config --global --add gitreview.username michaelobrien
sudo git config --global user.email frank.obrien@amdocs.com
sudo git config --global user.name "Michael OBrien"
sudo git config --global gitreview.remote origin
sudo mkdir log-326-rancher-ver
cd log-326-rancher-ver/
sudo git clone ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
cd logging-analytics/
sudo vi deploy/rancher/oom_rancher_setup.sh
sudo git add deploy/rancher/oom_rancher_setup.sh .
# setup git-review
sudo apt-get install git-review
sudo git config --global gitreview.remote origin
# upload a patch
sudo git commit -am "update rancher version to 1.6.18"
# 2nd line should be "Issue-ID: LOG-326"
sudo git commit -s --amend
sudo git review
Your change was committed before the commit hook was installed.
Amending the commit to add a gerrit change id.
remote: Processing changes: new: 1, refs: 1, done
remote: New Changes:
remote: https://gerrit.onap.org/r/55299 update rancher version to 1.6.18
remote:
To ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
* [new branch] HEAD -> refs/publish/master
# see
https://gerrit.onap.org/r/#/c/55299/
if you get a corrupted FS type "fsck -y /dev/sda1" |
...
https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands
Working with JSONPath
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Fortunately we can script most of what we can query from the state of our kubernetes deployment using JSONPath. We can then use jq to do additional processing to get values as an option.
Get the full json output to design JSONPath queries
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | LOG-914 |
---|
|
Use a different kubectl context
Code Block |
---|
|
kubectl --kubeconfig ~/.kube/config2 get pods --all-namespaces -o json
# we are looking to shutdown a rogue pod that is not responding to the normal deletion commands - but it contains a generated name
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
ubuntu@onap-oom-obrien-rancher-e0:~$ kubectl get pods --field-selector=status.phase!=Running --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
#"spec": {"containers": [{},"name": "portal-sdk",
kubectl get pods --namespace onap -o jsonpath="{.items[*].spec.containers[0].name}"
portal-sdk
# so combining the two queries
kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}"
onap-portal-portal-sdk-7c49c97955-smbws
# and wrapping it with a delete command
export POD_NAME=$(kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}")
echo "$POD_NAME"
kubectl delete pods $POD_NAME --grace-period=0 --force -n onap
ubuntu@onap-oom-obrien-rancher-e0:~$ sudo ./term.sh
onap-portal-portal-sdk-7c49c97955-smbws
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "onap-portal-portal-sdk-7c49c97955-smbws" force deleted
|
...
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# automatically via cd.sh in LOG-326
# get the dev.yaml and set any pods you want up to true as well as fill out the openstack parameters
sudo wget https://git.onap.org/oom/plain/kubernetes/onap/resources/environments/dev.yaml
sudo cp logging-analytics/deploy/cd.sh .
# or
# manually
cd oom/kubernetes/
sudo make clean
sudo make all
sudo make onap
sudo helm install local/onap -n onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set log.enabled=true
# adding another (so)
sudo helm upgrade local/onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set so.enabled=true --set log.enabled=true |
Get the nodeport of a particular service
Code Block |
---|
|
# human readable list
kubectl get services --all-namespaces | grep robot
# machine readable number - via JSONPath
kubectl get --namespace onap -o jsonpath="{.spec.ports[0].nodePort}" services robot) |
Test DNS URLS in the kubernetes ONAP namespace
Code Block |
---|
|
test urls in the robot container
wget http://pomba-sdcctxbuilder.onap:9530/sdccontextbuilder/health
wget http://pomba-networkdiscoveryctxbuilder.onap:9530/ndcontextbuilder/health |
...
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# override global docker pull policy for a single component
# set in oom/kubernetes/onap/values.yaml
# use global.pullPolicy in your -f yaml or a --set
|
Exec into a container of a pod with multiple containers
Code Block |
---|
|
# for
onap logdemonode-logdemonode-5c8bffb468-dhzcc 2/2 Running 0 1m
# use
kubectl exec -it logdemonode-logdemonode-5c8bffb468-dhzcc -n onap -c logdemonode bash |
Push a file into a Kubernetes container/pod
Code Block |
---|
|
opy files from the vm to the robot container - to avoid buiding a new robot image
root@ubuntu:~/_dev/62405_logback/testsuite/robot/testsuites# kubectl cp health-check.robot onap-robot-7c84f54558-f8mw7: -n onap
root@ubuntu:~/_dev/62405_logback/testsuite/robot/testsuites# kubectl cp ../resources/pomba_interface.robot onap-robot-7c84f54558-f8mw7: -n onap
move the files in the robot container to the proper dir
root@onap-robot-7c84f54558-f8mw7:/# cp health-check.robot /var/opt/OpenECOMP_ETE/robot/testsuites/
root@onap-robot-7c84f54558-f8mw7:/# ls
bin boot dev etc health-check.robot home lib lib64 media mnt opt pomba_interface.robot proc root run sbin share srv sys tmp usr var
root@onap-robot-7c84f54558-f8mw7:/# cp pomba_interface.robot /var/opt/OpenECOMP_ETE/robot/resources/
retest health
root@ubuntu:~/_dev/62405_logback/oom/kubernetes/robot# ./ete-k8s.sh onap health
and directly in the robot container
wget http://pomba-sdcctxbuilder.onap:9530/sdccontextbuilder/health
wget http://pomba-networkdiscoveryctxbuilder.onap:9530/ndcontextbuilder/health |
Restarting a container
Restarting a pod
If you change configuration like the logback.xml in a pod or would like restart an entire pod like the log and portal pods
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
cd oom/kubernetes
# do a make if anything is modified in your charts
sudo make all
#sudo make onap
ubuntu@ip-172-31-19-23:~/oom/kubernetes$ sudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=false
# wait and check in another terminal for all containers to terminate
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-5mng9 0/1 CrashLoopBackOff 9 29m
onap onap-log-kibana-fc88b6b79-nt7sd 1/1 Running 0 35m
onap onap-log-logstash-c5z4d 1/1 Terminating 0 4h
onap onap-log-logstash-ftxfz 1/1 Terminating 0 4h
onap onap-log-logstash-gl59m 1/1 Terminating |
Adding user kubectl accounts
Normally you don't use the admin account directly when working with particular namespaces. Details on how to create a user token and the appropriate role bindings.
Code Block |
---|
|
# TODO: create a script out of this
# create a namespace
# https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/#create-new-namespaces
vi mobrien_namespace.yaml
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "mobrien",
"labels": {
"name": "mobrien"
}
}
}
kubectl create -f mobrien_namespace.yaml
# or
kubectl --kubeconfig ~/.kube/admin create ns mobrien
namespace "mobrien" created
# service account
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create sa mobrien
serviceaccount "mobrien" created
# rolebinding mobrien
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-mobrien-privilegedpsp --clusterrole=privilegedpsp --serviceaccount=mobrien:mobrien
rolebinding "mobrien-mobrien-privilegedpsp" created
# rolebinding default
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-default-privilegedpsp --clusterrole=privilegedpsp --serviceaccount=mobrien:default
rolebinding "mobrien-default-privilegedpsp" created
# rolebinding admin
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-mobrien-admin --clusterrole=admin --serviceaccount=mobrien:mobrien
rolebinding "mobrien-mobrien-admin" created
# rolebinding persistent-volume-role
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-mobrien-persistent-volume-role --clusterrole=persistent-volume-role --serviceaccount=mobrien:mobrien
clusterrolebinding "mobrien-mobrien-persistent-volume-role" created
# rolebinding default-persistent-volume-role
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-default-persistent-volume-role --clusterrole=persistent-volume-role --serviceaccount=mobrien:default
clusterrolebinding "mobrien-default-persistent-volume-role" created
# rolebinding helm-pod-list
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-mobrien-helm-pod-list --clusterrole=helm-pod-list --serviceaccount=mobrien:mobrien
clusterrolebinding "mobrien-mobrien-helm-pod-list" created
# rolebinding default-helm-pod-list
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-default-helm-pod-list --clusterrole=helm-pod-list --serviceaccount=mobrien:default
clusterrolebinding "mobrien-default-helm-pod-list" created
# get the serviceAccount and extract the token to place into a config yaml
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien get sa
NAME SECRETS AGE
default 1 20m
mobrien 1 18m
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe serviceaccount mobrien
Name: mobrien
Namespace: mobrien
Labels: <none>
Annotations: <none>
Image pull secrets: <none>
Mountable secrets: mobrien-token-v9z5j
Tokens: mobrien-token-v9z5j
TOKEN=$(kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe secrets "$(kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe serviceaccount mobrien | grep -i Tokens | awk '{print $2}')" | grep token: | awk '{print $2}')
echo $TOKEN
eyJO....b3VudC
# put this in your ~/.kube/config and edit the namespace
|
see also https://stackoverflow.com/questions/44948483/create-user-in-kubernetes-for-kubectl
Helm on Rancher unauthorized
Cycle the RBAC to Github off/on if you get any security issue running helm commands
Code Block |
---|
|
ubuntu@a-ons1-master:~$ watch kubectl get pods --all-namespaces
ubuntu@a-ons1-master:~$ sudo helm list
Error: Unauthorized
ubuntu@a-ons1-master:~$ sudo helm list
NAME REVISION UPDATED STATUS CHART NAMESPACE
onap 4 Thu Mar 7 13:03:29 2019 DEPLOYED onap-3.0.0 onap
onap-dmaap 1 Thu Mar 7 13:03:32 2019 DEPLOYED dmaap-3.0.0 onap |
Working with JSONPath
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Fortunately we can script most of what we can query from the state of our kubernetes deployment using JSONPath. We can then use jq to do additional processing to get values as an option.
Get the full json output to design JSONPath queries
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | LOG-914 |
---|
|
Code Block |
---|
|
kubectl get pods --all-namespaces -o json
# we are looking to shutdown a rogue pod that is not responding to the normal deletion commands - but it contains a generated name
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
ubuntu@onap-oom-obrien-rancher-e0:~$ kubectl get pods --field-selector=status.phase!=Running --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
#"spec": {"containers": [{},"name": "portal-sdk",
kubectl get pods --namespace onap -o jsonpath="{.items[*].spec.containers[0].name}"
portal-sdk
# so combining the two queries
kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}"
onap-portal-portal-sdk-7c49c97955-smbws
# and wrapping it with a delete command
export POD_NAME=$(kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}")
echo "$POD_NAME"
kubectl delete pods $POD_NAME --grace-period=0 --force -n onap
ubuntu@onap-oom-obrien-rancher-e0:~$ sudo ./term.sh
onap-portal-portal-sdk-7c49c97955-smbws
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "onap-portal-portal-sdk-7c49c97955-smbws" force deleted
|
Installing a pod
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# automatically via cd.sh in LOG-326
# get the dev.yaml and set any pods you want up to true as well as fill out the openstack parameters
sudo wget https://git.onap.org/oom/plain/kubernetes/onap/resources/environments/dev.yaml
sudo cp logging-analytics/deploy/cd.sh .
# or
# manually
cd oom/kubernetes/
sudo make clean
sudo make all
sudo make onap
sudo helm install local/onap -n onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set log.enabled=true
# adding another (so)
sudo helm upgrade local/onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set so.enabled=true --set log.enabled=true |
Get the nodeport of a particular service
Code Block |
---|
|
# human readable list
kubectl get services --all-namespaces | grep robot
# machine readable number - via JSONPath
kubectl get --namespace onap -o jsonpath="{.spec.ports[0].nodePort}" services robot) |
Test DNS URLS in the kubernetes ONAP namespace
Code Block |
---|
|
test urls in the robot container
wget http://pomba-sdcctxbuilder.onap:9530/sdccontextbuilder/health
wget http://pomba-networkdiscoveryctxbuilder.onap:9530/ndcontextbuilder/health |
Override global policy
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# override global docker pull policy for a single component
# set in oom/kubernetes/onap/values.yaml
# use global.pullPolicy in your -f yaml or a --set
|
Exec into a container of a pod with multiple containers
Code Block |
---|
|
# for
onap logdemonode-logdemonode-5c8bffb468-dhzcc 2/2 Running 0 4h1m
onap# use
kubectl exec onap-log-logstash-nxsf8 1/1 Terminating 0 4h
onap onap-log-logstash-w8q8m 1/1 Terminating 0 4h
sudo helm upgrade -i onap local/onap --namespace onap --set portal.enabled=false
sudo vi portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
sudo make portal
sudo-it logdemonode-logdemonode-5c8bffb468-dhzcc -n onap -c logdemonode bash |
Push a file into a Kubernetes container/pod
Code Block |
---|
|
opy files from the vm to the robot container - to avoid buiding a new robot image
root@ubuntu:~/_dev/62405_logback/testsuite/robot/testsuites# kubectl cp health-check.robot onap-robot-7c84f54558-f8mw7: -n onap
root@ubuntu:~/_dev/62405_logback/testsuite/robot/testsuites# kubectl cp ../resources/pomba_interface.robot onap-robot-7c84f54558-f8mw7: -n onap
move the files in the robot container to the proper dir
root@onap-robot-7c84f54558-f8mw7:/# cp health-check.robot /var/opt/OpenECOMP_ETE/robot/testsuites/
root@onap-robot-7c84f54558-f8mw7:/# ls
bin boot dev etc health-check.robot home lib lib64 media mnt opt pomba_interface.robot proc root run sbin share srv sys tmp usr var
root@onap-robot-7c84f54558-f8mw7:/# cp pomba_interface.robot /var/opt/OpenECOMP_ETE/robot/resources/
retest health
root@ubuntu:~/_dev/62405_logback/oom/kubernetes/robot# ./ete-k8s.sh onap health
and directly in the robot container
wget http://pomba-sdcctxbuilder.onap:9530/sdccontextbuilder/health
wget http://pomba-networkdiscoveryctxbuilder.onap:9530/ndcontextbuilder/health |
Restarting a container
Restarting a pod
If you change configuration like the logback.xml in a pod or would like restart an entire pod like the log and portal pods
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
cd oom/kubernetes
# do a make if anything is modified in your charts
sudo make all
#sudo make onap
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
~/oom/kubernetes$ sudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=true
sudo helm upgrade -i onap local/onap --namespace onap --set portal.enabled=truefalse
# wait and check in another terminal for all containers to terminate
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-2jd655mng9 0/1 Init:0/1CrashLoopBackOff 9 0 31s29m
onap onap-log-kibana-fc88b6b79-5xqg4nt7sd 01/1 Init:0/1Running 0 31s35m
onap onap-log-logstash-5vq82c5z4d 01/1 Init:0/1Terminating 0 31s4h
onap onap-log-logstash-gvr9zftxfz 01/1 Init:0/1Terminating 0 31s4h
onap onap-log-logstash-qqzq5gl59m 01/1 Init:0/1Terminating 0 31s4h
onap onap-log-logstash-vbp2xnxsf8 01/1 Init:0/1 Terminating 0 31s4h
onap onap-log-logstash-wr9rdw8q8m 01/1 Terminating Init: 0/1 4h
0sudo helm upgrade -i onap local/onap --namespace onap --set 31s
portal.enabled=false
sudo vi portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
sudo make portal
sudo make onap
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-portallog
onapsudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=true
sudo helm upgrade -i onap local/onap --namespace onap --set portal-app-8486dc7ff8-nbps7.enabled=true
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-2jd65 0/21 Init:0/1 0 9m31s
onap onap-portallog-cassandrakibana-8588fbd698-4wthvfc88b6b79-5xqg4 10/1 RunningInit:0/1 0 9m31s
onap onap-portallog-db-7d6b95cd94-9x4kflogstash-5vq82 0/1 RunningInit:0/1 0 9m31s
onap onap-portallog-db-config-dpqkqlogstash-gvr9z 0/21 Init:0/1 0 9m31s
onap onap-portallog-sdk-77cd558c98-5255rlogstash-qqzq5 0/21 Init:0/1 0 9m31s
onap onap-portallog-widget-6469f4bc56-g8s62logstash-vbp2x 0/1 Init:0/1 0 9m31s
onap onap-portallog-zookeeper-5d8c598c4c-czpnzlogstash-wr9rd 10/1 RunningInit:0/1 0 9m |
downgrade docker if required
Code Block |
---|
sudo apt-get autoremove -y docker-engine |
Change max-pods from default 110 pod limit
Rancher ships with a 110 pod limit - you can override this on the kubernetes template for 1.10
https://lists.onap.org/g/onap-discuss/topic/oom_110_kubernetes_pod/25213556?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,25213556
Manual procedure: change the kubernetes template (1pt2) before using it to create an environment (1a7)
add --max-pods=500 to the "Additional Kubelet Flags" box on the v1.10.13 version of the kubernetes template from the "Manage Environments" dropdown on the left of the 8880 rancher console.
View file |
---|
name | 20180905_rancher_increase_110_pod_limit.mp4 |
---|
height | 250 |
---|
|
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | OOM-1137 |
---|
|
Image Removed
Image Removed
Or capture the output of the REST PUT call - and add around line 111 of the script https://git.onap.org/logging-analytics/tree/deploy/rancher/oom_rancher_setup.sh#n111
Image Removed
Automated - ongoing
Code Block |
---|
|
ubuntu@ip-172-31-27-183:~$ curl 'http://127.0.0.1:8880/v2-beta/projecttemplates/1pt2' --data-binary '{"id":"1pt2","type":"projectTemplate","baseType":"projectTemplate","name":"Kubernetes","state":"active","accountId":null,"created":"2018-09-05T14:12:24Z","createdTS":1536156744000,"data":{"fields":{"stacks":[{"name":"healthcheck","templateId":"library:infra*healthcheck"},{"answers":{"CONSTRAINT_TYPE":"none","CLOUD_PROVIDER":"rancher","AZURE_CLOUD":"AzurePublicCloud","AZURE_TENANT_ID":"","AZURE_CLIENT_ID":"","AZURE_CLIENT_SECRET":"","AZURE_SEC_GROUP":"","RBAC":false,"REGISTRY":"","BASE_IMAGE_NAMESPACE":"","POD_INFRA_CONTAINER_IMAGE":"rancher/pause-amd64:3.0","HTTP_PROXY":"","NO_PROXY":"rancher.internal,cluster.local,rancher-metadata,rancher-kubernetes-auth,kubernetes,169.254.169.254,169.254.169.250,10.42.0.0/16,10.43.0.0/16","ENABLE_ADDONS":true,"ENABLE_RANCHER_INGRESS_CONTROLLER":true,"RANCHER_LB_SEPARATOR":"rancherlb","DNS_REPLICAS":"1","ADDITIONAL_KUBELET_FLAGS":"","FAIL_ON_SWAP":"false","ADDONS_LOG_VERBOSITY_LEVEL":"2","AUDIT_LOGS":false,"ADMISSION_CONTROLLERS":"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota","SERVICE_CLUSTER_CIDR":"10.43.0.0/16","DNS_CLUSTER_IP":"10.43.0.10","KUBEAPI_CLUSTER_IP":"10.43.0.1","KUBERNETES_CIPHER_SUITES":"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305","DASHBOARD_CPU_LIMIT":"100m","DASHBOARD_MEMORY_LIMIT":"300Mi","INFLUXDB_HOST_PATH":"","EMBEDDED_BACKUPS":true,"BACKUP_PERIOD":"15m0s","BACKUP_RETENTION":"24h","ETCD_HEARTBEAT_INTERVAL":"500","ETCD_ELECTION_TIMEOUT":"5000"},"name":"kubernetes","templateVersionId":"library:infra*k8s:47"},{"name":"network-services","templateId":"library:infra*network-services"},{"name":"ipsec","templateId":"library:infra*ipsec"}]}},"description":"Default Kubernetes template","externalId":"catalog://library:project*kubernetes:0","isPublic":true,"kind":"projectTemplate","removeTime":null,"removed":null,"stacks":[{"type":"catalogTemplate",31s
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-portal
onap onap-portal-app-8486dc7ff8-nbps7 0/2 Init:0/1 0 9m
onap onap-portal-cassandra-8588fbd698-4wthv 1/1 Running 0 9m
onap onap-portal-db-7d6b95cd94-9x4kf 0/1 Running 0 9m
onap onap-portal-db-config-dpqkq 0/2 Init:0/1 0 9m
onap onap-portal-sdk-77cd558c98-5255r 0/2 Init:0/1 0 9m
onap onap-portal-widget-6469f4bc56-g8s62 0/1 Init:0/1 0 9m
onap onap-portal-zookeeper-5d8c598c4c-czpnz 1/1 Running 0 9m |
Kubernetes inter pod communication - using DNS service addresses
Try to use the service name (with or without the namespace) - not the service IP address for inter namespace communication (nodeports or ingress is only required outside the namespace)
For example log-ls:5044 or log-ls.onap:5044
Code Block |
---|
|
# example curl call between AAI and SDC
amdocs@obriensystemsu0:~$ kubectl exec -it -n onap onap-aai-aai-graphadmin-7bd5fc9bd-l4v4z bash
Defaulting container name to aai-graphadmin.
root@aai-graphadmin:/opt/app/aai-graphadmin# curl http://sdc-fe:8181
<HTML><HEAD><TITLE>Error 404 - Not Found</TITLE><BODY><H2>Error 404 - Not Found.</H2>
</ul><hr><a href="http://eclipse.org/jetty"><img border=0 src="/favicon.ico"/></a> <a href="http://eclipse.org/jetty">Powered by Jetty:// 9.4.12.v20180830</a><hr/> |
docker if required
Code Block |
---|
sudo apt-get autoremove -y docker-engine |
Change max-pods from default 110 pod limit
Rancher ships with a 110 pod limit - you can override this on the kubernetes template for 1.10
https://lists.onap.org/g/onap-discuss/topic/oom_110_kubernetes_pod/25213556?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,25213556
Manual procedure: change the kubernetes template (1pt2) before using it to create an environment (1a7)
add --max-pods=500 to the "Additional Kubelet Flags" box on the v1.10.13 version of the kubernetes template from the "Manage Environments" dropdown on the left of the 8880 rancher console.
View file |
---|
name | 20180905_rancher_increase_110_pod_limit.mp4 |
---|
height | 250 |
---|
|
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | OOM-1137 |
---|
|
Image Added
Image Added
Or capture the output of the REST PUT call - and add around line 111 of the script https://git.onap.org/logging-analytics/tree/deploy/rancher/oom_rancher_setup.sh#n111
Image Added
Automated - ongoing
Code Block |
---|
|
ubuntu@ip-172-31-27-183:~$ curl 'http://127.0.0.1:8880/v2-beta/projecttemplates/1pt2' --data-binary '{"id":"1pt2","type":"projectTemplate","baseType":"projectTemplate","name":"Kubernetes","state":"active","accountId":null,"created":"2018-09-05T14:12:24Z","createdTS":1536156744000,"data":{"fields":{"stacks":[{"name":"healthcheck","templateId":"library:infra*healthcheck"},{"type":"catalogTemplate","answers":{"CONSTRAINT_TYPE":"none","CLOUD_PROVIDER":"rancher","AZURE_CLOUD":"AzurePublicCloud","AZURE_TENANT_ID":"","AZURE_CLIENT_ID":"","AZURE_CLIENT_SECRET":"","AZURE_SEC_GROUP":"","RBAC":false,"REGISTRY":"","BASE_IMAGE_NAMESPACE":"","POD_INFRA_CONTAINER_IMAGE":"rancher/pause-amd64:3.0","HTTP_PROXY":"","NO_PROXY":"rancher.internal,cluster.local,rancher-metadata,rancher-kubernetes-auth,kubernetes,169.254.169.254,169.254.169.250,10.42.0.0/16,10.43.0.0/16","ENABLE_ADDONS":true,"ENABLE_RANCHER_INGRESS_CONTROLLER":true,"RANCHER_LB_SEPARATOR":"rancherlb","DNS_REPLICAS":"1","ADDITIONAL_KUBELET_FLAGS":"--max-pods=600","FAIL_ON_SWAP":"false","ADDONS_LOG_VERBOSITY_LEVEL":"2","AUDIT_LOGS":false,"ADMISSION_CONTROLLERS":"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota","SERVICE_CLUSTER_CIDR":"10.43.0.0/16","DNS_CLUSTER_IP":"10.43.0.10","KUBEAPI_CLUSTER_IP":"10.43.0.1","KUBERNETES_CIPHER_SUITES":"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305","DASHBOARD_CPU_LIMIT":"100m","DASHBOARD_MEMORY_LIMIT":"300Mi","INFLUXDB_HOST_PATH":"","EMBEDDED_BACKUPS":true,"BACKUP_PERIOD":"15m0s","BACKUP_RETENTION":"24h","ETCD_HEARTBEAT_INTERVAL":"500","ETCD_ELECTION_TIMEOUT":"5000"},"name":"kubernetes","templateVersionId":"library:infra*k8s:47"},{"type":"catalogTemplate","name":"network-services","templateId":"library:infra*network-services"},{"type":"catalogTemplate","name":"ipsec","templateId":"library:infra*ipsec"}],"transitioning":"no","transitioningMessage":null,"transitioningProgress":null,"uuid":null}' --compressed
{"id":"9107b9ce-0b61-4c22-bc52-f147babb0ba7","type":"error","links":{},"actions":{},"status":405,"code":"Method not allowed","message":"Method not allowed","detail":null,"baseType":"error"} |
Results
Single AWS 244G 32vCore VM with 110 pod limit workaround - 164 pods (including both secondary DCAEGEN2 orchestrations at 30 and 55 min) - most of the remaining 8 container failures are known/in-progress issues.
Code Block |
---|
|
ubuntu@ip-172-31-20-218:~$ free
_HEARTBEAT_INTERVAL":"500","ETCD_ELECTION_TIMEOUT":"5000"},"name":"kubernetes","templateVersionId":"library:infra*k8s:47"},{"name":"network-services","templateId":"library:infra*network-services"},{"name":"ipsec","templateId":"library:infra*ipsec"}]}},"description":"Default Kubernetes template","externalId":"catalog://library:project*kubernetes:0","isPublic":true,"kind":"projectTemplate","removeTime":null,"removed":null,"stacks":[{"type":"catalogTemplate","name":"healthcheck","templateId":"library:infra*healthcheck"},{"type":"catalogTemplate","answers":{"CONSTRAINT_TYPE":"none","CLOUD_PROVIDER":"rancher","AZURE_CLOUD":"AzurePublicCloud","AZURE_TENANT_ID":"","AZURE_CLIENT_ID":"","AZURE_CLIENT_SECRET":"","AZURE_SEC_GROUP":"","RBAC":false,"REGISTRY":"","BASE_IMAGE_NAMESPACE":"","POD_INFRA_CONTAINER_IMAGE":"rancher/pause-amd64:3.0","HTTP_PROXY":"","NO_PROXY":"rancher.internal,cluster.local,rancher-metadata,rancher-kubernetes-auth,kubernetes,169.254.169.254,169.254.169.250,10.42.0.0/16,10.43.0.0/16","ENABLE_ADDONS":true,"ENABLE_RANCHER_INGRESS_CONTROLLER":true,"RANCHER_LB_SEPARATOR":"rancherlb","DNS_REPLICAS":"1","ADDITIONAL_KUBELET_FLAGS":"--max-pods=600","FAIL_ON_SWAP":"false","ADDONS_LOG_VERBOSITY_LEVEL":"2","AUDIT_LOGS":false,"ADMISSION_CONTROLLERS":"NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,ResourceQuota","SERVICE_CLUSTER_CIDR":"10.43.0.0/16","DNS_CLUSTER_IP":"10.43.0.10","KUBEAPI_CLUSTER_IP":"10.43.0.1","KUBERNETES_CIPHER_SUITES":"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305","DASHBOARD_CPU_LIMIT":"100m","DASHBOARD_MEMORY_LIMIT":"300Mi","INFLUXDB_HOST_PATH":"","EMBEDDED_BACKUPS":true,"BACKUP_PERIOD":"15m0s","BACKUP_RETENTION":"24h","ETCD_HEARTBEAT_INTERVAL":"500","ETCD_ELECTION_TIMEOUT":"5000"},"name":"kubernetes","templateVersionId":"library:infra*k8s:47"},{"type":"catalogTemplate","name":"network-services","templateId":"library:infra*network-services"},{"type":"catalogTemplate","name":"ipsec","templateId":"library:infra*ipsec"}],"transitioning":"no","transitioningMessage":null,"transitioningProgress":null,"uuid":null}' --compressed
{"id":"9107b9ce-0b61-4c22-bc52-f147babb0ba7","type":"error","links":{},"actions":{},"status":405,"code":"Method not allowed","message":"Method not allowed","detail":null,"baseType":"error"} |
Results
Single AWS 244G 32vCore VM with 110 pod limit workaround - 164 pods (including both secondary DCAEGEN2 orchestrations at 30 and 55 min) - most of the remaining 8 container failures are known/in-progress issues.
Code Block |
---|
|
ubuntu@ip-172-31-20-218:~$ free
total used free shared buff/cache available
Mem: 251754696 111586672 45000724 193628 95167300 137158588
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep onap | wc -l
164
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep onap | grep -E '1/1|2/2' | wc -l
155
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l
8
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2'
onap dep-dcae-ves-collector-59d4ff58f7-94rpq 1/2 Running 0 4m
onap onap-aai-champ-68ff644d85-rv7tr 0/1 Running 0 59m
onap onap-aai-gizmo-856f86d664-q5pvg 1/2 CrashLoopBackOff 10 59m
onap onap-oof-85864d6586-zcsz5 0/1 ImagePullBackOff 0 59m
onap onap-pomba-kibana-d76b6dd4c-sfbl6 0/1 Init:CrashLoopBackOff 8 59m
onap onap-pomba-networkdiscovery-85d76975b7-mfk92 1/2 CrashLoopBackOff 11 59m
onap onap-pomba-networkdiscoveryctxbuilder-c89786dfc-qnlx9 1/2 CrashLoopBackOff 10 59m
onap onap-vid-84c88db589-8cpgr 1/2 CrashLoopBackOff 9 59m |
Operations
Get failed/pending containers
Code Block |
---|
theme | Midnight |
---|
linenumbers | true |
---|
|
kubectl get pods --all-namespaces | grep -E "0/|1/2" | wc -l |
Code Block |
---|
kubectl cluster-info
# get pods/containers
kubectl get pods --all-namespaces
# get port mappings
kubectl get services --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE
default nginx-1389790254-lgkz3 1/1 Running 1 5d
kube-system heapster-4285517626-x080g 1/1 Running 1 6d
kube-system kube-dns-638003847-tst97 3/3 Running 3 6d
kube-system kubernetes-dashboard-716739405-fnn3g 1/1 Running 2 6d
kube-system monitoring-grafana-2360823841-hr824 1/1 Running 1 6d
kube-system monitoring-influxdb-2323019309-k7h1t 1/1 Running 1 6d
kube-system tiller-deploy-737598192-x9wh5 1/1 Running 1 6d
# ssh into a pod
kubectl -n default exec -it nginx-1389790254-lgkz3 /bin/bash
# get logs
kubectl -n default logs -f nginx-1389790254-lgkz3 |
Exec
kubectl -n onap-aai exec -it aai-resources-1039856271-d9bvq bash
Bounce/Fix a failed container
Periodically one of the higher containers in a dependency tree will not get restarted in time to pick up running child containers - usually this is the kibana container
Fix this or "any" container by deleting the container in question and kubernetes will bring another one up.
Code Block |
---|
|
root@a-onap-auto-20180412-ref:~# kubectl get services --all-namespaces | grep log
onap dev-vfc-catalog ClusterIP 10.43.210.8 <none> 8806/TCP 5d
onap log-es NodePort 10.43.77.87 <none> 9200:30254/TCP 5d
onap log-es-tcp ClusterIP 10.43.159.93 <none> 9300/TCP 5d
onap log-kibana NodePort 10.43.41.102 <none> 5601:30253/TCP 5d
onap log-ls NodePort 10.43.180.165 <none> 5044:30255/TCP 5d
onap log-ls-http ClusterIP 10.43.13.180 <none> 9600/TCP 5d
root@a-onap-auto-20180412-ref:~# kubectl get pods --all-namespaces | grep log
onap dev-log-elasticsearch-66cdc4f855-wmpkz 1/1 Running 0 5d
onap dev-log-kibana-5b6f86bcb4-drpzq 0/1 Running 1076 5d
onap dev-log-logstash-6d9fdccdb6-ngq2f 1/1 Running 0 5d
onap dev-vfc-catalog-7d89bc8b9d-vxk74 2/2 Running 0 5d
root@a-onap-auto-20180412-ref:~# kubectl delete pod dev-log-kibana-5b6f86bcb4-drpzq -n onap
pod "dev-log-kibana-5b6f86bcb4-drpzq" deleted
root@a-onap-auto-20180412-ref:~# kubectl get pods --all-namespaces | grep log
onap dev-log-elasticsearch-66cdc4f855-wmpkz 1/1 Running 0 5d
onap dev-log-kibana-5b6f86bcb4-drpzq 0/1 Terminating 1076 5d
onap dev-log-kibana-5b6f86bcb4-gpn2m 0/1 Pending 0 12s
onap dev-log-logstash-6d9fdccdb6-ngq2f 1/1 Running 0 5d
onap dev-vfc-catalog-7d89bc8b9d-vxk74 2/2 Running 0 5d |
a helm namespace delete or a kubectl delete or a helm purge may not remove everything based on hanging PVs - use
Code Block |
---|
|
#after a kubectl delete namespace onap
sudo helm delete --purge onap
melliott [12:11 PM]
kubectl delete pods <pod> --grace-period=0 --force -n onap |
Reboot VMs hosting a Deployment
aka https://lists.onap.org/g/onap-discuss/topic/procedure_to_shut_down_and/29540879?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,29540879
in progress
Code Block |
---|
|
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | wc -l
234
# master 20190125
ubuntu@a-ld0:~$ kubectl scale --replicas=0 deployments --all -n onap
deployment.extensions/onap-aaf-aaf-cm scaled
deployment.extensions/onap-aaf-aaf-cs scaled
deployment.extensions/onap-aaf-aaf-fs scaled
deployment.extensions/onap-aaf-aaf-gui scaled
deployment.extensions/onap-aaf-aaf-hello scaled
deployment.extensions/onap-aaf-aaf-locate scaled
deployment.extensions/onap-aaf-aaf-oauth scaled
deployment.extensions/onap-aaf-aaf-service scaled
deployment.extensions/onap-aaf-aaf-sms scaled
deployment.extensions/onap-aai-aai scaled
deployment.extensions/onap-aai-aai-babel scaled
deployment.extensions/onap-aai-aai-champ scaled
deployment.extensions/onap-aai-aai-data-router scaled
deployment.extensions/onap-aai-aai-elasticsearch scaled
deployment.extensions/onap-aai-aai-gizmo scaled
deployment.extensions/onap-aai-aai-graphadmin scaled
deployment.extensions/onap-aai-aai-modelloader scaled
deployment.extensions/onap-aai-aai-resources scaled
deployment.extensions/onap-aai-aai-search-data scaled
deployment.extensions/onap-aai-aai-sparky-be scaled
deployment.extensions/onap-aai-aai-spike scaled
deployment.extensions/onap-aai-aai-traversal scaled
deployment.extensions/onap-appc-appc-ansible-server scaled
deployment.extensions/onap-appc-appc-cdt scaled
deployment.extensions/onap-appc-appc-dgbuilder scaled
deployment.extensions/onap-clamp-clamp scaled
deployment.extensions/onap-clamp-clamp-dash-es scaled
deployment.extensions/onap-clamp-clamp-dash-kibana scaled
deployment.extensions/onap-clamp-clamp-dash-logstash scaled
deployment.extensions/onap-clamp-clampdb scaled
deployment.extensions/onap-cli-cli scaled
deployment.extensions/onap-consul-consul scaled
deployment.extensions/onap-contrib-netbox-app scaled
deployment.extensions/onap-contrib-netbox-nginx scaled
deployment.extensions/onap-contrib-netbox-postgres scaled
deployment.extensions/onap-dcaegen2-dcae-bootstrap scaled
deployment.extensions/onap-dcaegen2-dcae-cloudify-manager scaled
deployment.extensions/onap-dcaegen2-dcae-healthcheck scaled
deployment.extensions/onap-dcaegen2-dcae-pgpool scaled
deployment.extensions/onap-dmaap-dbc-pgpool scaled
deployment.extensions/onap-dmaap-dmaap-bus-controller scaled
deployment.extensions/onap-dmaap-dmaap-dr-db scaled
deployment.extensions/onap-dmaap-dmaap-dr-node scaled
deployment.extensions/onap-dmaap-dmaap-dr-prov scaled
deployment.extensions/onap-esr-esr-gui scaled
deployment.extensions/onap-esr-esr-server scaled
deployment.extensions/onap-log-log-elasticsearch scaled
deployment.extensions/onap-log-log-kibana scaled
deployment.extensions/onap-log-log-logstash scaled
deployment.extensions/onap-msb-kube2msb scaled
deployment.extensions/onap-msb-msb-consul scaled
deployment.extensions/onap-msb-msb-discovery scaled
deployment.extensions/onap-msb-msb-eag scaled
deployment.extensions/onap-msb-msb-iag scaled
deployment.extensions/onap-multicloud-multicloud scaled
deployment.extensions/onap-multicloud-multicloud-azure scaled
deployment.extensions/onap-multicloud-multicloud-ocata scaled
deployment.extensions/onap-multicloud-multicloud-pike scaled
deployment.extensions/onap-multicloud-multicloud-vio scaled
deployment.extensions/onap-multicloud-multicloud-windriver scaled
deployment.extensions/onap-oof-music-tomcat scaled
deployment.extensions/onap-oof-oof scaled
deployment.extensions/onap-oof-oof-cmso-service scaled
deployment.extensions/onap-oof-oof-has-api scaled
deployment.extensions/onap-oof-oof-has-controller scaled
deployment.extensions/onap-oof-oof-has-data scaled
deployment.extensions/onap-oof-oof-has-reservation scaled
deployment.extensions/onap-oof-oof-has-solver scaled
deployment.extensions/onap-policy-brmsgw scaled
deployment.extensions/onap-policy-nexus scaled
deployment.extensions/onap-policy-pap scaled
deployment.extensions/onap-policy-policy-distribution scaled
deployment.extensions/onap-policy-policydb scaled
deployment.extensions/onap-pomba-pomba-aaictxbuilder scaled
deployment.extensions/onap-pomba-pomba-contextaggregator scaled
deployment.extensions/onap-pomba-pomba-data-router scaled
deployment.extensions/onap-pomba-pomba-elasticsearch scaled
deployment.extensions/onap-pomba-pomba-kibana scaled
deployment.extensions/onap-pomba-pomba-networkdiscovery scaled
deployment.extensions/onap-pomba-pomba-networkdiscoveryctxbuilder scaled
deployment.extensions/onap-pomba-pomba-sdcctxbuilder scaled
deployment.extensions/onap-pomba-pomba-sdncctxbuilder scaled
deployment.extensions/onap-pomba-pomba-search-data scaled
deployment.extensions/onap-pomba-pomba-servicedecomposition scaled
deployment.extensions/onap-pomba-pomba-validation-service scaled
deployment.extensions/onap-portal-portal-app scaled
deployment.extensions/onap-portal-portal-cassandra scaled
deployment.extensions/onap-portal-portal-db scaled
deployment.extensions/onap-portal-portal-sdk scaled
deployment.extensions/onap-portal-portal-widget scaled
deployment.extensions/onap-portal-portal-zookeeper scaled
deployment.extensions/onap-robot-robot scaled
deployment.extensions/onap-sdc-sdc-be scaled
deployment.extensions/onap-sdc-sdc-cs scaled
deployment.extensions/onap-sdc-sdc-dcae-be scaled
deployment.extensions/onap-sdc-sdc-dcae-dt scaled
deployment.extensions/onap-sdc-sdc-dcae-fe scaled
deployment.extensions/onap-sdc-sdc-dcae-tosca-lab scaled
deployment.extensions/onap-sdc-sdc-es scaled
deployment.extensions/onap-sdc-sdc-fe scaled
deployment.extensions/onap-sdc-sdc-kb scaled
deployment.extensions/onap-sdc-sdc-onboarding-be scaled
deployment.extensions/onap-sdc-sdc-wfd-be scaled
deployment.extensions/onap-sdc-sdc-wfd-fe scaled
deployment.extensions/onap-sdnc-controller-blueprints scaled
deployment.extensions/onap-sdnc-network-name-gen scaled
deployment.extensions/onap-sdnc-sdnc-ansible-server scaled
deployment.extensions/onap-sdnc-sdnc-dgbuilder scaled
deployment.extensions/onap-sdnc-sdnc-dmaap-listener scaled
deployment.extensions/onap-sdnc-sdnc-portal scaled
deployment.extensions/onap-sdnc-sdnc-ueb-listener scaled
deployment.extensions/onap-sniro-emulator-sniro-emulator scaled
deployment.extensions/onap-so-so scaled
deployment.extensions/onap-so-so-bpmn-infra scaled
deployment.extensions/onap-so-so-catalog-db-adapter scaled
deployment.extensions/onap-so-so-mariadb scaled
deployment.extensions/onap-so-so-monitoring scaled
deployment.extensions/onap-so-so-openstack-adapter scaled
deployment.extensions/onap-so-so-request-db-adapter scaled
deployment.extensions/onap-so-so-sdc-controller scaled
deployment.extensions/onap-so-so-sdnc-adapter scaled
deployment.extensions/onap-so-so-vfc-adapter scaled
deployment.extensions/onap-uui-uui scaled
deployment.extensions/onap-uui-uui-server scaled
deployment.extensions/onap-vfc-vfc-catalog scaled
deployment.extensions/onap-vfc-vfc-db scaled
deployment.extensions/onap-vfc-vfc-ems-driver scaled
deployment.extensions/onap-vfc-vfc-generic-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-huawei-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-juju-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-multivim-proxy scaled
deployment.extensions/onap-vfc-vfc-nokia-v2vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-nokia-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-nslcm scaled
deployment.extensions/onap-vfc-vfc-resmgr scaled
deployment.extensions/onap-vfc-vfc-vnflcm scaled
deployment.extensions/onap-vfc-vfc-vnfmgr scaled
deployment.extensions/onap-vfc-vfc-vnfres scaled
deployment.extensions/onap-vfc-vfc-workflow scaled
deployment.extensions/onap-vfc-vfc-workflow-engine scaled
deployment.extensions/onap-vfc-vfc-zte-sdnc-driver scaled
deployment.extensions/onap-vfc-vfc-zte-vnfm-driver scaled
deployment.extensions/onap-vid-vid scaled
deployment.extensions/onap-vnfsdk-vnfsdk scaled
deployment.extensions/onap-vnfsdk-vnfsdk-pgpool scaled
deployment.extensions/onap-vvp-vvp scaled
deployment.extensions/onap-vvp-vvp-ci-uwsgi scaled
deployment.extensions/onap-vvp-vvp-cms-uwsgi scaled
deployment.extensions/onap-vvp-vvp-em-uwsgi scaled
deployment.extensions/onap-vvp-vvp-ext-haproxy scaled
deployment.extensions/onap-vvp-vvp-gitlab scaled
deployment.extensions/onap-vvp-vvp-imagescanner scaled
deployment.extensions/onap-vvp-vvp-int-haproxy scaled
deployment.extensions/onap-vvp-vvp-jenkins scaled
deployment.extensions/onap-vvp-vvp-postgres scaled
deployment.extensions/onap-vvp-vvp-redis scaled
ubuntu@a-ld0:~$ kubectl scale --replicas=0 statefulsets --all -n onap
statefulset.apps/onap-aaf-aaf-sms-quorumclient scaled
statefulset.apps/onap-aaf-aaf-sms-vault scaled
statefulset.apps/onap-aai-aai-cassandra scaled
statefulset.apps/onap-appc-appc scaled
statefulset.apps/onap-appc-appc-db scaled
statefulset.apps/onap-consul-consul-server scaled
statefulset.apps/onap-dcaegen2-dcae-db scaled
statefulset.apps/onap-dcaegen2-dcae-redis scaled
statefulset.apps/onap-dmaap-dbc-pg scaled
statefulset.apps/onap-dmaap-message-router scaled
statefulset.apps/onap-dmaap-message-router-kafka scaled
statefulset.apps/onap-dmaap-message-router-zookeeper scaled
statefulset.apps/onap-oof-cmso-db scaled
statefulset.apps/onap-oof-music-cassandra scaled
statefulset.apps/onap-oof-zookeeper scaled
statefulset.apps/onap-policy-drools scaled
statefulset.apps/onap-policy-pdp scaled
statefulset.apps/onap-policy-policy-apex-pdp scaled
statefulset.apps/onap-sdnc-controller-blueprints-db scaled
statefulset.apps/onap-sdnc-nengdb scaled
statefulset.apps/onap-sdnc-sdnc scaled
statefulset.apps/onap-sdnc-sdnc-db scaled
statefulset.apps/onap-vid-vid-mariadb-galera scaled
statefulset.apps/onap-vnfsdk-vnfsdk-postgres scaled
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | grep Terminating | wc -l
179
# 4 min later
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | grep Terminating | wc -l
118
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | wc -l
135
# completed/failed jobs are left
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | wc -l
27
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | grep Terminating | wc -l
0
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system heapster-7b48b696fc-99cd6 1/1 Running 0 2d
kube-system kube-dns-6655f78c68-k4dh4 3/3 Running 0 2d
kube-system kubernetes-dashboard-6f54f7c4b-fhqmf 1/1 Running 0 2d
kube-system monitoring-grafana-7877679464-cscg4 1/1 Running 0 2d
kube-system monitoring-influxdb-64664c6cf5-wmw8w 1/1 Running 0 total2d
kube-system tiller-deploy-78db58d887-9qlwh used free shared buff1/cache1 available Mem: Running 251754696 0 111586672 45000724 2d
193628onap 95167300 137158588 ubuntu@iponap-aaf-172aaf-31sms-20-218:~$ kubectl get pods --all-namespaces | grep onap | wc -l
164
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep onap | grep -E '1/1|2/2' | wc -l
155
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l
8
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2'
onappreload-k7mx6 0/1 Completed 0 2d
onap onap-aaf-aaf-sshsm-distcenter-lk5st 0/1 Completed 0 dep-dcae-ves-collector-59d4ff58f7-94rpq2d
onap onap-aaf-aaf-sshsm-testca-lg2g6 1/2 Running 0/1 Completed 0 4m2d
onap onap-aai-champ-68ff644d85-rv7tr-aai-aai-graphadmin-create-db-schema-7qhcr 0/1 Completed 0 0/12d
onap Running onap-aai-aai-traversal-update-query-data-n6dt6 0/1 Init:0/1 0 289 59m2d
onap onap-aaicontrib-netbox-gizmoapp-856f86d664provisioning-q5pvg7mb4f 0/1 Completed 0 1/2 2d
onap CrashLoopBackOff onap-contrib-netbox-app-provisioning-wbvpv 10 0/1 59m onap Error onap-oof-85864d6586-zcsz5 0 2d
onap onap-oof-music-cassandra-job-config-wvwgv 0/1 ImagePullBackOffCompleted 0 59m2d
onap onap-pombaoof-oof-kibanahas-d76b6dd4c-sfbl6 healthcheck-s44jv 0/1 Init:CrashLoopBackOffCompleted 80 59m2d
onap onap-pombaoof-oof-networkdiscoveryhas-85d76975b7-mfk92onboard-kcfb6 1/2 0/1 CrashLoopBackOff Completed 0 11 59m2d
onap onap-portal-pombaportal-networkdiscoveryctxbuilderdb-c89786dfcconfig-qnlx9vt848 1/2 CrashLoopBackOff 0/2 10 Completed 59m0 onap 2d
onap-vid-84c88db589-8cpgr onap-sdc-sdc-be-config-backend-cktdp 0/1 1/2Completed 0 CrashLoopBackOff 2d
onap 9 onap-sdc-sdc-cs-config-cassandra-t5lt7 59m |
...
Get failed/pending containers
Code Block |
---|
theme | Midnight |
---|
linenumbers | true |
---|
|
kubectl get pods --all-namespaces | grep -E "0/|1/2" | wc -l |
Code Block |
---|
kubectl cluster-info # getCompleted pods/containers kubectl get pods0 --all-namespaces # get port mappings kubectl get services --all-namespaces -o wide
NAMESPACE 2d
onap NAME onap-sdc-sdc-dcae-be-tools-8pkqz 0/1 Completed 0 READY STATUS 2d
RESTARTS onap AGE default nginx-1389790254-lgkz3onap-sdc-sdc-dcae-be-tools-lrcwk 10/1 RunningInit:Error 10 2d
5donap kube-system heapster-4285517626-x080g onap-sdc-sdc-es-config-elasticsearch-9zrdw 10/1 RunningCompleted 1 0 6d kube-system2d
onap kube-dns-638003847-tst97 onap-sdc-sdc-onboarding-be-cassandra-init-8klpv 3/30/1 RunningCompleted 3 0 6d kube-system2d
onap kubernetes-dashboard-716739405-fnn3g 1/1 onap-sdc-sdc-wfd-be-workflow-init-b4j4v Running 2 0/1 6d kube-system monitoring-grafana-2360823841-hr824Completed 1/10 Running 12d
onap 6d
kube-system monitoring-influxdb-2323019309-k7h1tonap-vid-vid-galera-config-d4srr 1/1 Running 1 0/1 6d
kube-system Completed 0 tiller-deploy-737598192-x9wh5 2d
1/1onap Running 1onap-vnfsdk-vnfsdk-init-postgres-bm668 6d # ssh0/1 into a pod kubectl -n default execCompleted -it nginx-1389790254-lgkz3 /bin/bash
# get logs
kubectl -n default logs -f nginx-1389790254-lgkz3 |
Exec
kubectl -n onap-aai exec -it aai-resources-1039856271-d9bvq bash
Bounce/Fix a failed container
Periodically one of the higher containers in a dependency tree will not get restarted in time to pick up running child containers - usually this is the kibana container
Fix this or "any" container by deleting the container in question and kubernetes will bring another one up.
Code Block |
---|
|
root@a-onap-auto-20180412-ref:~# kubectl get services --all-namespaces | grep log
onap 0 2d
# deployments are still there
# reboot server
ubuntu@a-ld0:~$ sudo helm list
NAME REVISION UPDATED dev-vfc-catalog STATUS ClusterIP CHART 10.43.210.8 <none> 8806/TCP NAMESPACE
onap 28 Thu Jan 24 18:48:42 2019 DEPLOYED onap-3.0.0 onap
onap-aaf 23 Thu Jan 24 18:48:45 2019 DEPLOYED aaf-3.0.0 5d onap onap
logonap-esaai 21 Thu Jan 24 18:48:51 2019 DEPLOYED aai-3.0.0 NodePort 10.43.77.87 onap <none>
onap-appc 9200:30254/TCP 7 Thu Jan 24 18:49:02 2019 DEPLOYED appc-3.0.0 onap
onap-clamp 6 Thu Jan 24 18:49:06 2019 DEPLOYED clamp-3.0.0 5d
onap
log-es-tcponap-cli 5 Thu Jan ClusterIP24 10.43.159.9318:49:09 2019 DEPLOYED cli-3.0.0 <none> onap 9300/TCP
onap-consul 27 Thu Jan 24 18:49:11 2019 DEPLOYED consul-3.0.0 onap
onap-contrib 2 Thu Jan 24 18:49:14 2019 DEPLOYED contrib-3.0.0 onap
5d
onap-dcaegen2 24 log-kibana Thu Jan 24 18:49:18 2019 DEPLOYED dcaegen2-3.0.0 onap
onap-dmaap NodePort 10.43.41.102 25 <none> Thu Jan 24 5601:30253/TCP18:49:22 2019 DEPLOYED dmaap-3.0.0 onap
onap-esr 20 Thu Jan 24 18:49:27 2019 DEPLOYED esr-3.0.0 onap
onap-log 5d onap 11 Thu Jan 24 18:49:31 2019 DEPLOYED log-ls3.0.0 onap
onap-msb NodePort 10.43.180.165 26 <none> Thu Jan 5044:30255/TCP 24 18:49:34 2019 DEPLOYED msb-3.0.0 onap
onap-multicloud 19 Thu Jan 24 18:49:37 2019 DEPLOYED multicloud-3.0.0 onap
onap-oof 18 5d onap Thu Jan 24 18:49:44 2019 DEPLOYED oof-3.0.0 log-ls-http onap
onap-policy ClusterIP 10.43.13.180 13 <none> Thu Jan 24 18:49:52 2019 DEPLOYED policy-3.0.0 9600/TCP onap
onap-pomba 4 Thu Jan 24 18:49:56 2019 DEPLOYED pomba-3.0.0 onap
onap-portal 12 Thu Jan 24 5d
root@a-onap-auto-20180412-ref:~# kubectl get pods --all-namespaces | grep log
18:50:03 2019 DEPLOYED portal-3.0.0 onap
onap-robot dev-log-elasticsearch-66cdc4f855-wmpkz 22 1/1 Thu Jan 24 18:50:08 Running2019 DEPLOYED robot-3.0.0 onap
onap-sdc 0 16 5d onap Thu Jan 24 18:50:11 2019 DEPLOYED sdc-3.0.0 dev-log-kibana-5b6f86bcb4-drpzq onap
onap-sdnc 0/1 15 Running Thu Jan 24 18:50:17 2019 DEPLOYED sdnc-3.0.0 1076 onap 5d
onap-sniro-emulator 1 Thu Jan dev-log-logstash-6d9fdccdb6-ngq2f24 18:50:21 2019 DEPLOYED sniro-emulator-3.0.0 onap
onap-so 1/1 17 Running Thu Jan 24 18:50:24 2019 DEPLOYED so-3.0.0 0 onap
5d
onap-uui dev-vfc-catalog-7d89bc8b9d-vxk74 9 Thu Jan 24 18:50:30 2019 DEPLOYED uui-3.0.0 2/2 Running onap
onap-vfc 0 10 Thu Jan 24 5d
root@a-onap-auto-20180412-ref:~# kubectl delete pod dev-log-kibana-5b6f86bcb4-drpzq -n onap
pod "dev-log-kibana-5b6f86bcb4-drpzq" deleted
root@a-onap-auto-20180412-ref:~# kubectl get pods --all-namespaces | grep log
onap18:50:33 2019 DEPLOYED vfc-3.0.0 onap
onap-vid dev-log-elasticsearch-66cdc4f855-wmpkz 14 Thu Jan 24 1/118:50:38 2019 DEPLOYED vid-3.0.0 Running onap
onap-vnfsdk 0 8 Thu Jan 5d
onap24 18:50:41 2019 DEPLOYED vnfsdk-3.0.0 onap dev-log-kibana-5b6f86bcb4-drpzq
onap-vvp 3 0/1 Thu Jan Terminating24 18:50:44 2019 DEPLOYED vvp-3.0.0 1076 onap
sudo 5dreboot onapnow
ubuntu@a-ld0:~$ sudo docker ps
CONTAINER ID dev-log-kibana-5b6f86bcb4-gpn2m IMAGE 0/1 Pending COMMAND 0 CREATED 12s onap STATUS dev-log-logstash-6d9fdccdb6-ngq2f PORTS 1/1 Running 0NAMES
f61dc9902248 5d
onap rancher/agent:v1.2.11 "/run.sh run" dev-vfc-catalog-7d89bc8b9d-vxk74 2 days ago 2/2 Up 30 seconds Running 0 5d |
...
a helm namespace delete or a kubectl delete or a helm purge may not remove everything based on hanging PVs - use
Code Block |
---|
|
#after a kubectl delete namespace onap sudo helm delete --purge onap melliott [12:11 PM]
kubectl delete pods <pod> --grace-period=0 --force -n onap |
Scale/Terminate a Deployment
aka https://lists.onap.org/g/onap-discuss/topic/procedure_to_shut_down_and/29540879?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,29540879
in progress
Code Block |
---|
|
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | wc -l
234
# master 20190125
ubuntu@a-ld0:~$ kubectl scale --replicas=0 deployments --all -n onap
deployment.extensions/onap-aaf-aaf-cm scaled
deployment.extensions/onap-aaf-aaf-cs scaled
deployment.extensions/onap-aaf-aaf-fs scaled
deployment.extensions/onap-aaf-aaf-gui scaled
deployment.extensions/onap-aaf-aaf-hello scaled
deployment.extensions/onap-aaf-aaf-locate scaled
deployment.extensions/onap-aaf-aaf-oauth scaled
deployment.extensions/onap-aaf-aaf-service scaled
deployment.extensions/onap-aaf-aaf-sms scaled
deployment.extensions/onap-aai-aai scaled
deployment.extensions/onap-aai-aai-babel scaled
deployment.extensions/onap-aai-aai-champ scaled
deployment.extensions/onap-aai-aai-data-router scaled
deployment.extensions/onap-aai-aai-elasticsearch scaled
deployment.extensions/onap-aai-aai-gizmo scaled
deployment.extensions/onap-aai-aai-graphadmin scaled
deployment.extensions/onap-aai-aai-modelloader scaled
deployment.extensions/onap-aai-aai-resources scaled
deployment.extensions/onap-aai-aai-search-data scaled
deployment.extensions/onap-aai-aai-sparky-be scaled
deployment.extensions/onap-aai-aai-spike scaled
deployment.extensions/onap-aai-aai-traversal scaled
deployment.extensions/onap-appc-appc-ansible-server scaled
deployment.extensions/onap-appc-appc-cdt scaled
deployment.extensions/onap-appc-appc-dgbuilder scaled
deployment.extensions/onap-clamp-clamp scaled
deployment.extensions/onap-clamp-clamp-dash-es scaled
deployment.extensions/onap-clamp-clamp-dash-kibana scaled
deployment.extensions/onap-clamp-clamp-dash-logstash scaled
deployment.extensions/onap-clamp-clampdb scaled
deployment.extensions/onap-cli-cli scaled
deployment.extensions/onap-consul-consul scaled
deployment.extensions/onap-contrib-netbox-app scaled
deployment.extensions/onap-contrib-netbox-nginx scaled
deployment.extensions/onap-contrib-netbox-postgres scaled
deployment.extensions/onap-dcaegen2-dcae-bootstrap scaled
deployment.extensions/onap-dcaegen2-dcae-cloudify-manager scaled
deployment.extensions/onap-dcaegen2-dcae-healthcheck scaled
deployment.extensions/onap-dcaegen2-dcae-pgpool scaled
deployment.extensions/onap-dmaap-dbc-pgpool scaled
deployment.extensions/onap-dmaap-dmaap-bus-controller scaled
deployment.extensions/onap-dmaap-dmaap-dr-db scaled
deployment.extensions/onap-dmaap-dmaap-dr-node scaled
deployment.extensions/onap-dmaap-dmaap-dr-prov scaled
deployment.extensions/onap-esr-esr-gui scaled
deployment.extensions/onap-esr-esr-server scaled
deployment.extensions/onap-log-log-elasticsearch scaled
deployment.extensions/onap-log-log-kibana scaled
deployment.extensions/onap-log-log-logstash scaled
deployment.extensions/onap-msb-kube2msb scaled
deployment.extensions/onap-msb-msb-consul scaled
deployment.extensions/onap-msb-msb-discovery scaled
deployment.extensions/onap-msb-msb-eag scaled
deployment.extensions/onap-msb-msb-iag scaled
deployment.extensions/onap-multicloud-multicloud scaled
deployment.extensions/onap-multicloud-multicloud-azure scaled
deployment.extensions/onap-multicloud-multicloud-ocata scaled
deployment.extensions/onap-multicloud-multicloud-pike scaled
deployment.extensions/onap-multicloud-multicloud-vio scaled
deployment.extensions/onap-multicloud-multicloud-windriver scaled
deployment.extensions/onap-oof-music-tomcat scaled
deployment.extensions/onap-oof-oof scaled
deployment.extensions/onap-oof-oof-cmso-service scaled
deployment.extensions/onap-oof-oof-has-api scaled
deployment.extensions/onap-oof-oof-has-controller scaled
deployment.extensions/onap-oof-oof-has-data scaled
deployment.extensions/onap-oof-oof-has-reservation scaled
deployment.extensions/onap-oof-oof-has-solver scaled
deployment.extensions/onap-policy-brmsgw scaled
deployment.extensions/onap-policy-nexus scaled
deployment.extensions/onap-policy-pap scaled
deployment.extensions/onap-policy-policy-distribution scaled
deployment.extensions/onap-policy-policydb scaled
deployment.extensions/onap-pomba-pomba-aaictxbuilder scaled
deployment.extensions/onap-pomba-pomba-contextaggregator scaled
deployment.extensions/onap-pomba-pomba-data-router scaled
deployment.extensions/onap-pomba-pomba-elasticsearch scaled
deployment.extensions/onap-pomba-pomba-kibana scaled
deployment.extensions/onap-pomba-pomba-networkdiscovery scaled
deployment.extensions/onap-pomba-pomba-networkdiscoveryctxbuilder scaled
deployment.extensions/onap-pomba-pomba-sdcctxbuilder scaled
deployment.extensions/onap-pomba-pomba-sdncctxbuilder scaled
deployment.extensions/onap-pomba-pomba-search-data scaled
deployment.extensions/onap-pomba-pomba-servicedecomposition scaled
deployment.extensions/onap-pomba-pomba-validation-service scaled
deployment.extensions/onap-portal-portal-app scaled
deployment.extensions/onap-portal-portal-cassandra scaled
deployment.extensions/onap-portal-portal-db scaled
deployment.extensions/onap-portal-portal-sdk scaled
deployment.extensions/onap-portal-portal-widget scaled
deployment.extensions/onap-portal-portal-zookeeper scaled
deployment.extensions/onap-robot-robot scaled
deployment.extensions/onap-sdc-sdc-be scaled
deployment.extensions/onap-sdc-sdc-cs scaled
deployment.extensions/onap-sdc-sdc-dcae-be scaled
deployment.extensions/onap-sdc-sdc-dcae-dt scaled
deployment.extensions/onap-sdc-sdc-dcae-fe scaled
deployment.extensions/onap-sdc-sdc-dcae-tosca-lab scaled
deployment.extensions/onap-sdc-sdc-es scaled
deployment.extensions/onap-sdc-sdc-fe scaled
deployment.extensions/onap-sdc-sdc-kb scaled
deployment.extensions/onap-sdc-sdc-onboarding-be scaled
deployment.extensions/onap-sdc-sdc-wfd-be scaled
deployment.extensions/onap-sdc-sdc-wfd-fe scaled
deployment.extensions/onap-sdnc-controller-blueprints scaled
deployment.extensions/onap-sdnc-network-name-gen scaled
deployment.extensions/onap-sdnc-sdnc-ansible-server scaled
deployment.extensions/onap-sdnc-sdnc-dgbuilder scaled
deployment.extensions/onap-sdnc-sdnc-dmaap-listener scaled
deployment.extensions/onap-sdnc-sdnc-portal scaled
deployment.extensions/onap-sdnc-sdnc-ueb-listener scaled
deployment.extensions/onap-sniro-emulator-sniro-emulator scaled
deployment.extensions/onap-so-so scaled
deployment.extensions/onap-so-so-bpmn-infra scaled
deployment.extensions/onap-so-so-catalog-db-adapter scaled
deployment.extensions/onap-so-so-mariadb scaled
deployment.extensions/onap-so-so-monitoring scaled
deployment.extensions/onap-so-so-openstack-adapter scaled
deployment.extensions/onap-so-so-request-db-adapter scaled
deployment.extensions/onap-so-so-sdc-controller scaled
deployment.extensions/onap-so-so-sdnc-adapter scaled
deployment.extensions/onap-so-so-vfc-adapter scaled
deployment.extensions/onap-uui-uui scaled
deployment.extensions/onap-uui-uui-server scaled
deployment.extensions/onap-vfc-vfc-catalog scaled
deployment.extensions/onap-vfc-vfc-db scaled
deployment.extensions/onap-vfc-vfc-ems-driver scaled
deployment.extensions/onap-vfc-vfc-generic-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-huawei-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-juju-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-multivim-proxy scaled
deployment.extensions/onap-vfc-vfc-nokia-v2vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-nokia-vnfm-driver scaled
deployment.extensions/onap-vfc-vfc-nslcm scaled
deployment.extensions/onap-vfc-vfc-resmgr scaled
deployment.extensions/onap-vfc-vfc-vnflcm scaled
deployment.extensions/onap-vfc-vfc-vnfmgr scaled
deployment.extensions/onap-vfc-vfc-vnfres scaled
deployment.extensions/onap-vfc-vfc-workflow scaled
deployment.extensions/onap-vfc-vfc-workflow-engine scaled
deployment.extensions/onap-vfc-vfc-zte-sdnc-driver scaled
deployment.extensions/onap-vfc-vfc-zte-vnfm-driver scaled
deployment.extensions/onap-vid-vid scaled
deployment.extensions/onap-vnfsdk-vnfsdk scaled
deployment.extensions/onap-vnfsdk-vnfsdk-pgpool scaled
deployment.extensions/onap-vvp-vvp scaled
deployment.extensions/onap-vvp-vvp-ci-uwsgi scaled
deployment.extensions/onap-vvp-vvp-cms-uwsgi scaled
deployment.extensions/onap-vvp-vvp-em-uwsgi scaled
deployment.extensions/onap-vvp-vvp-ext-haproxy scaled
deployment.extensions/onap-vvp-vvp-gitlab scaled
deployment.extensions/onap-vvp-vvp-imagescanner scaled
deployment.extensions/onap-vvp-vvp-int-haproxy scaled
deployment.extensions/onap-vvp-vvp-jenkins scaled
deployment.extensions/onap-vvp-vvp-postgres scaled
deployment.extensions/onap-vvp-vvp-redis scaled
ubuntu@a-ld0:~$ kubectl scale --replicas=0 statefulsets --all -n onap
statefulset.apps/onap-aaf-aaf-sms-quorumclient scaled
statefulset.apps/onap-aaf-aaf-sms-vault scaled
statefulset.apps/onap-aai-aai-cassandra scaled
statefulset.apps/onap-appc-appc scaled
statefulset.apps/onap-appc-appc-db scaled
statefulset.apps/onap-consul-consul-server scaled
statefulset.apps/onap-dcaegen2-dcae-db scaled
statefulset.apps/onap-dcaegen2-dcae-redis scaled
statefulset.apps/onap-dmaap-dbc-pg scaled
statefulset.apps/onap-dmaap-message-router scaled
statefulset.apps/onap-dmaap-message-router-kafka scaled
statefulset.apps/onap-dmaap-message-router-zookeeper scaled
statefulset.apps/onap-oof-cmso-db scaled
statefulset.apps/onap-oof-music-cassandra scaled
statefulset.apps/onap-oof-zookeeper scaled
statefulset.apps/onap-policy-drools scaled
statefulset.apps/onap-policy-pdp scaled
statefulset.apps/onap-policy-policy-apex-pdp scaled
statefulset.apps/onap-sdnc-controller-blueprints-db scaled
statefulset.apps/onap-sdnc-nengdb scaled
statefulset.apps/onap-sdnc-sdnc scaled
statefulset.apps/onap-sdnc-sdnc-db scaled
statefulset.apps/onap-vid-vid-mariadb-galera scaled
statefulset.apps/onap-vnfsdk-vnfsdk-postgres scaled rancher-agent
01f40fa3a4ed rancher/server:v1.6.25 "/usr/bin/entry /u..." 2 days ago Up 30 seconds 3306/tcp, 0.0.0.0:8880->8080/tcp rancher_server
# back up
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system heapster-7b48b696fc-99cd6 0/1 Error 0 2d
kube-system kube-dns-6655f78c68-k4dh4 0/3 Error 0 2d
kube-system kubernetes-dashboard-6f54f7c4b-fhqmf 0/1 Error 0 2d
kube-system monitoring-grafana-7877679464-cscg4 0/1 Completed 0 2d
kube-system monitoring-influxdb-64664c6cf5-wmw8w 0/1 Completed 0 2d
kube-system tiller-deploy-78db58d887-9qlwh 1/1 Running 0 2d
onap onap-aaf-aaf-sms-preload-k7mx6 0/1 Completed 0 2d
onap onap-aaf-aaf-sshsm-distcenter-lk5st 0/1 Completed 0 2d
....
# note not all replicas were actually 1 - some were 2,3,7
kubectl scale --replicas=1 deployments --all -n onap
kubectl scale --replicas=1 statefulsets --all -n onap
# 6m
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | grep Terminating | wc -l
179
|
Remove a Deployment
see also
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | OOM-1463 |
---|
|
https://git.onap.org/logging-analytics/tree/deploy/cd.sh#n57
required for a couple pods that leave left over resources and for the secondary cloudify out-of-band orchestration in DCAEGEN2
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | OOM-1089 |
---|
|
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | DCAEGEN2-1067 |
---|
|
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | DCAEGEN2-1068 |
---|
|
Code Block |
---|
|
kubectl delete namespace onap
sudo helm delete --purge onap
kubectl delete pv --all
kubectl delete pvc --all
kubectl delete secrets --all
kubectl delete clusterrolebinding --all
and
sudo rm -rf /dockerdata-nfs/onap-<pod>E '0/|1/2|1/3|2/3' | wc -l
199
# 20m
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2|1/3|2/3' | wc -l
180
# 60 min
ubuntu@a-ld0:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2|1/3|2/3' | wc -l
42 |
Remove a Deployment
Cloud Native Deployment#RemoveaDeployment
Rotate Logs
find them
du --max-depth=1 | sort -nr
...
Make sure the robot container is deployed - you may run directly from the kubernetes folder outside of the container - see https://git.onap.org/logging-analytics/tree/deploy/cd.sh#n297
Code Block |
---|
|
# make sure the robot container is up via --set robot.enabled=true
cd oom/kubernetes/robot
./ete-k8s.sh $ENVIRON health |
...