...
Committing Code
Code Block |
---|
|
# clone
git clone ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
# modify files
# stage your changes
git add .
git commit -m "your commit message"
# commit your staged changes with sign-off
git commit -s --amend
# add Issue-ID after Change-ID
# Submit your commit to ONAP Gerrit for review
git review
# goto https://gerrit.onap.org/r/#/dashboard/self |
...
https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands
Working with JSONPath
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Fortunately we can script most of what we can query from the state of our kubernetes deployment using JSONPath. We can then use jq to do additional processing to get values as an option.
Get the full json output to design JSONPath queries
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | LOG-914 |
---|
|
Use a different kubectl context
Code Block |
---|
|
kubectl --kubeconfig ~/.kube/config2 get pods --all-namespaces -o json
# we are looking to shutdown a rogue pod that is not responding to the normal deletion commands - but it contains a generated name
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
ubuntu@onap-oom-obrien-rancher-e0:~$ kubectl get pods --field-selector=status.phase!=Running --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
onap onap-portal-portal-namespaces |
Adding user kubectl accounts
Normally you don't use the admin account directly when working with particular namespaces. Details on how to create a user token and the appropriate role bindings.
Code Block |
---|
|
# TODO: create a script out of this
# create a namespace
# https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/#create-new-namespaces
vi mobrien_namespace.yaml
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "mobrien",
"labels": {
"name": "mobrien"
}
}
}
kubectl create -f mobrien_namespace.yaml
# or
kubectl --kubeconfig ~/.kube/admin create ns mobrien
namespace "mobrien" created
# service account
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create sa mobrien
serviceaccount "mobrien" created
# rolebinding mobrien
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-mobrien-privilegedpsp --clusterrole=privilegedpsp --serviceaccount=mobrien:mobrien
rolebinding "mobrien-mobrien-privilegedpsp" created
# rolebinding default
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-default-privilegedpsp --clusterrole=privilegedpsp --serviceaccount=mobrien:default
rolebinding "mobrien-default-privilegedpsp" created
# rolebinding admin
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-mobrien-admin --clusterrole=admin --serviceaccount=mobrien:mobrien
rolebinding "mobrien-mobrien-admin" created
# rolebinding persistent-volume-role
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-mobrien-persistent-volume-role --clusterrole=persistent-volume-role --serviceaccount=mobrien:mobrien
clusterrolebinding "mobrien-mobrien-persistent-volume-role" created
# rolebinding default-persistent-volume-role
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-default-persistent-volume-role --clusterrole=persistent-volume-role --serviceaccount=mobrien:default
clusterrolebinding "mobrien-default-persistent-volume-role" created
# rolebinding helm-pod-list
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-mobrien-helm-pod-list --clusterrole=helm-pod-list --serviceaccount=mobrien:mobrien
clusterrolebinding "mobrien-mobrien-helm-pod-list" created
# rolebinding default-helm-pod-list
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-default-helm-pod-list --clusterrole=helm-pod-list --serviceaccount=mobrien:default
clusterrolebinding "mobrien-default-helm-pod-list" created
# get the serviceAccount and extract the token to place into a config yaml
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien get sa
NAME SECRETS AGE
default 1 20m
mobrien 1 18m
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe serviceaccount mobrien
Name: mobrien
Namespace: mobrien
Labels: <none>
Annotations: <none>
Image pull secrets: <none>
Mountable secrets: mobrien-token-v9z5j
Tokens: mobrien-token-v9z5j
TOKEN=$(kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe secrets "$(kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe serviceaccount mobrien | grep -i Tokens | awk '{print $2}')" | grep token: | awk '{print $2}')
echo $TOKEN
eyJO....b3VudC
# put this in your ~/.kube/config and edit the namespace
|
see also https://stackoverflow.com/questions/44948483/create-user-in-kubernetes-for-kubectl
Helm on Rancher unauthorized
Cycle the RBAC to Github off/on if you get any security issue running helm commands
Code Block |
---|
|
ubuntu@a-ons1-master:~$ watch kubectl get pods --all-namespaces
ubuntu@a-ons1-master:~$ sudo helm list
Error: Unauthorized
ubuntu@a-ons1-master:~$ sudo helm list
NAME REVISION UPDATED STATUS CHART NAMESPACE
onap 4 Thu Mar 7 13:03:29 2019 DEPLOYED onap-3.0.0 onap
onap-dmaap 1 Thu Mar 7 13:03:32 2019 DEPLOYED dmaap-3.0.0 onap |
Working with JSONPath
https://kubernetes.io/docs/reference/kubectl/jsonpath/
Fortunately we can script most of what we can query from the state of our kubernetes deployment using JSONPath. We can then use jq to do additional processing to get values as an option.
Get the full json output to design JSONPath queries
Jira Legacy |
---|
server | System Jira |
---|
serverId | 4733707d-2057-3a0f-ae5e-4fd8aff50176 |
---|
key | LOG-914 |
---|
|
Code Block |
---|
|
kubectl get pods --all-namespaces -o json
# we are looking to shutdown a rogue pod that is not responding to the normal deletion commands - but it contains a generated name
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
ubuntu@onap-oom-obrien-rancher-e0:~$ kubectl get pods --field-selector=status.phase!=Running --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
#"spec": {"containers": [{},"name": "portal-sdk",
kubectl get pods --namespace onap -o jsonpath="{.items[*].spec.containers[0].name}"
portal-sdk
# so combining the two queries
kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}"
onap-portal-portal-sdk-7c49c97955-smbws
# and wrapping it with a delete command
export POD_NAME=$(kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}")
echo "$POD_NAME"
kubectl delete pods $POD_NAME --grace-period=0 --force -n onap
ubuntu@onap-oom-obrien-rancher-e0:~$ sudo ./term.sh
onap-portal-portal-sdk-7c49c97955-smbws
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "onap-portal-portal-sdk-7c49c97955-smbws" force deleted
|
Installing a pod
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# automatically via cd.sh in LOG-326
# get the dev.yaml and set any pods you want up to true as well as fill out the openstack parameters
sudo wget https://git.onap.org/oom/plain/kubernetes/onap/resources/environments/dev.yaml
sudo cp logging-analytics/deploy/cd.sh .
# or
# manually
cd oom/kubernetes/
sudo make clean
sudo make all
sudo make onap
sudo helm install local/onap -n onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set log.enabled=true
# adding another (so)
sudo helm upgrade local/onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set so.enabled=true --set log.enabled=true |
...
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# override global docker pull policy for a single component
# set in oom/kubernetes/onap/values.yaml
# use global.pullPolicy in your -f yaml or a --set
|
Exec into a container of a pod with multiple containers
...
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
cd oom/kubernetes
# do a make if anything is modified in your charts
sudo make all
#sudo make onap
ubuntu@ip-172-31-19-23:~/oom/kubernetes$ sudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=false
# wait and check in another terminal for all containers to terminate
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-5mng9 0/1 CrashLoopBackOff 9 29m
onap onap-log-kibana-fc88b6b79-nt7sd 1/1 Running 0 35m
onap onap-log-logstash-c5z4d 1/1 Terminating 0 4h
onap onap-log-logstash-ftxfz 1/1 Terminating 0 4h
onap onap-log-logstash-gl59m 1/1 Terminating 0 4h
onap onap-log-logstash-nxsf8 1/1 Terminating 0 4h
onap onap-log-logstash-w8q8m 1/1 Terminating 0 4h
sudo helm upgrade -i onap local/onap --namespace onap --set portal.enabled=false
sudo vi portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
sudo make portal
sudo make onap
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
sudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=true
sudo helm upgrade -i onap local/onap --namespace onap --set portal.enabled=true
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-2jd65 0/1 Init:0/1 0 31s
onap onap-log-kibana-fc88b6b79-5xqg4 0/1 Init:0/1 0 31s
onap onap-log-logstash-5vq82 0/1 Init:0/1 0 31s
onap onap-log-logstash-gvr9z 0/1 Init:0/1 0 31s
onap onap-log-logstash-qqzq5 0/1 Init:0/1 0 31s
onap onap-log-logstash-vbp2x 0/1 Init:0/1 0 31s
onap onap-log-logstash-wr9rd 0/1 Init:0/1 0 31s
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-portal
onap onap-portal-app-8486dc7ff8-nbps7 0/2 Init:0/1 0 9m
onap onap-portal-cassandra-8588fbd698-4wthv 1/1 Running 0 9m
onap onap-portal-db-7d6b95cd94-9x4kf 0/1 Running 0 9m
onap onap-portal-db-config-dpqkq 0/2 Init:0/1 0 9m
onap onap-portal-sdk-77cd558c98-5255r 0/2 Init:0/1 0 9m
onap onap-portal-widget-6469f4bc56-g8s62 0/1 Init:0/1 0 9m
onap onap-portal-zookeeper-5d8c598c4c-czpnz 1/1 Running 0 9m |
Kubernetes inter pod communication - using DNS service addresses
Try to use the service name (with or without the namespace) - not the service IP address for inter namespace communication (nodeports or ingress is only required outside the namespace)
For example log-ls:5044 or log-ls.onap:5044
Code Block |
---|
|
# example curl call between AAI and SDC
amdocs@obriensystemsu0:~$ 0kubectl exec 9m
onap -it -n onap onap-portalaai-aai-zookeepergraphadmin-5d8c598c4c7bd5fc9bd-czpnzl4v4z bash
Defaulting container name to aai-graphadmin.
root@aai-graphadmin:/opt/app/aai-graphadmin# 1/1 Running 0 9m |
...
curl http://sdc-fe:8181
<HTML><HEAD><TITLE>Error 404 - Not Found</TITLE><BODY><H2>Error 404 - Not Found.</H2>
</ul><hr><a href="http://eclipse.org/jetty"><img border=0 src="/favicon.ico"/></a> <a href="http://eclipse.org/jetty">Powered by Jetty:// 9.4.12.v20180830</a><hr/> |
docker if required
Code Block |
---|
sudo apt-get autoremove -y docker-engine |
...