...
Committing Code
Code Block |
---|
|
# clone
git clone ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
# modify files
# stage your changes
git add .
git commit -m "your commit message"
# commit your staged changes with sign-off
git commit -s --amend
# add Issue-ID after Change-ID
# Submit your commit to ONAP Gerrit for review
git review
# goto https://gerrit.onap.org/r/#/dashboard/self |
...
Workstation configuration
Ubuntu 16.04 on VMware Workstation 15 or Fusion 8 or AWS/Azure VM
Note: do not use the gui upgrade (will cause the vm to periodically lock) - do individual apt-get 's
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# start with clean VM, I use root, you can use the recommended non-root account
sudo vi /etc/hosts
# add your hostname to ::1 and 127.0.0.1 or each sudo command will hang for up to 10 sec on DNS resolution especially on ubuntu 18.04
sudo apt-get update
sudo apt-get install openjdk-8-jdk
# not in headless vm
sudo apt-get install ubuntu-desktop
#sudo apt-get install git
sudo apt-get install maven
#or
sudo wget http://apache.mirror.gtcomm.net/maven/maven-3/3.5.4/binaries/apache-maven-3.5.4-bin.tar.gz
sudo cp ap(tab) /opt
cd /opt
tar -xvf apache-maven-3.5.4-bin.tar.gz
sudo vi /etc/environment
MAVEN_OPTS="-Xms8192 -Djava.net.preferIPv4Stack=true"
# restart the terminal
ubuntu@ip-172-31-78-76:~$ mvn -version
Apache Maven 3.5.4 (1edded0938998edf8bf061f1ceb3cfdeccf443fe; 2018-06-17T18:33:14Z)
Maven home: /opt/apache-maven-3.5.4
Java version: 1.8.0_171, vendor: Oracle Corporation, runtime: /usr/lib/jvm/java-8-openjdk-amd64/jre
sudo vi ~/.ssh/config
Host *
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
# a couple options on copying the ssh key
# from another machine
root@ubuntu:~/_dev#
#cat ~/.ssh/id_rsa | ssh -i ~/.ssh/onap_rsa ubuntu@ons.onap.info 'cat >> .ssh/onap_rsa && echo "Key copied"'
Key copied
sudo chown ubuntu:ubuntu ~/.ssh/onap_rsa
# or
# scp onap gerrit cert into VM from host macbook
obrien:obrienlabs amdocs$ scp ~/.ssh/onap_rsa amdocs@192.168.211.129:~/
move to root
sudo su -
root@obriensystemsu0:~# cp /home/amdocs/onap_rsa .
ls /home/amdocs/.m2
cp onap_rsa ~/.ssh/id_rsa
chmod 400 ~/.ssh/id_rsa
# testmove yourfrom gerritroot access
sudo to ubuntu - if using non-root user
sudo chown ubuntu:ubuntu ~/.ssh/onap_rsa
# test your gerrit access
sudo git config --global --add gitreview.username michaelobrien
sudo git config --global user.email frank.obrien@amdocs.com
sudo git config --global user.name "Michael OBrien"
sudo git config --global gitreview.remote origin
sudo mkdir log-326-rancher-ver
cd log-326-rancher-ver/
sudo git clone ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
cd logging-analytics/
sudo vi deploy/rancher/oom_rancher_setup.sh
sudo git add deploy/rancher/oom_rancher_setup.sh .
# setup git-review
sudo apt-get install git-review
sudo git config --global gitreview.remote origin
# upload a patch
sudo git commit -am "update rancher version to 1.6.18"
# 2nd line should be "Issue-ID: LOG-326"
sudo git commit -s --amend
sudo git review
Your change was committed before the commit hook was installed.
Amending the commit to add a gerrit change id.
remote: Processing changes: new: 1, refs: 1, done
remote: New Changes:
remote: https://gerrit.onap.org/r/55299 update rancher version to 1.6.18
remote:
To ssh://michaelobrien@gerrit.onap.org:29418/logging-analytics
* [new branch] HEAD -> refs/publish/master
# see
https://gerrit.onap.org/r/#/c/55299/
if you get a corrupted FS type "fsck -y /dev/sda1" |
...
https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commandsreference/generated/kubectl/kubectl-commands
Use a different kubectl context
Code Block |
---|
|
kubectl --kubeconfig ~/.kube/config2 get pods --all-namespaces |
Adding user kubectl accounts
Normally you don't use the admin account directly when working with particular namespaces. Details on how to create a user token and the appropriate role bindings.
Code Block |
---|
|
# TODO: create a script out of this
# create a namespace
# https://kubernetes.io/docs/tasks/administer-cluster/namespaces-walkthrough/#create-new-namespaces
vi mobrien_namespace.yaml
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "mobrien",
"labels": {
"name": "mobrien"
}
}
}
kubectl create -f mobrien_namespace.yaml
# or
kubectl --kubeconfig ~/.kube/admin create ns mobrien
namespace "mobrien" created
# service account
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create sa mobrien
serviceaccount "mobrien" created
# rolebinding mobrien
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-mobrien-privilegedpsp --clusterrole=privilegedpsp --serviceaccount=mobrien:mobrien
rolebinding "mobrien-mobrien-privilegedpsp" created
# rolebinding default
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-default-privilegedpsp --clusterrole=privilegedpsp --serviceaccount=mobrien:default
rolebinding "mobrien-default-privilegedpsp" created
# rolebinding admin
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create rolebinding mobrien-mobrien-admin --clusterrole=admin --serviceaccount=mobrien:mobrien
rolebinding "mobrien-mobrien-admin" created
# rolebinding persistent-volume-role
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-mobrien-persistent-volume-role --clusterrole=persistent-volume-role --serviceaccount=mobrien:mobrien
clusterrolebinding "mobrien-mobrien-persistent-volume-role" created
# rolebinding default-persistent-volume-role
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-default-persistent-volume-role --clusterrole=persistent-volume-role --serviceaccount=mobrien:default
clusterrolebinding "mobrien-default-persistent-volume-role" created
# rolebinding helm-pod-list
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-mobrien-helm-pod-list --clusterrole=helm-pod-list --serviceaccount=mobrien:mobrien
clusterrolebinding "mobrien-mobrien-helm-pod-list" created
# rolebinding default-helm-pod-list
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien create clusterrolebinding mobrien-default-helm-pod-list --clusterrole=helm-pod-list --serviceaccount=mobrien:default
clusterrolebinding "mobrien-default-helm-pod-list" created
# get the serviceAccount and extract the token to place into a config yaml
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien get sa
NAME SECRETS AGE
default 1 20m
mobrien 1 18m
kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe serviceaccount mobrien
Name: mobrien
Namespace: mobrien
Labels: <none>
Annotations: <none>
Image pull secrets: <none>
Mountable secrets: mobrien-token-v9z5j
Tokens: mobrien-token-v9z5j
TOKEN=$(kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe secrets "$(kubectl --kubeconfig ~/.kube/admin --namespace=mobrien describe serviceaccount mobrien | grep -i Tokens | awk '{print $2}')" | grep token: | awk '{print $2}')
echo $TOKEN
eyJO....b3VudC
# put this in your ~/.kube/config and edit the namespace
|
see also https://stackoverflow.com/questions/44948483/create-user-in-kubernetes-for-kubectl
Helm on Rancher unauthorized
Cycle the RBAC to Github off/on if you get any security issue running helm commands
Code Block |
---|
|
ubuntu@a-ons1-master:~$ watch kubectl get pods --all-namespaces
ubuntu@a-ons1-master:~$ sudo helm list
Error: Unauthorized
ubuntu@a-ons1-master:~$ sudo helm list
NAME REVISION UPDATED STATUS CHART NAMESPACE
onap 4 Thu Mar 7 13:03:29 2019 DEPLOYED onap-3.0.0 onap
onap-dmaap 1 Thu Mar 7 13:03:32 2019 DEPLOYED dmaap-3.0.0 onap |
Working with JSONPath
https://kubernetes.io/docs/reference/kubectl/jsonpath/
...
Code Block |
---|
|
kubectl get pods --all-namespaces -o json
# we are looking to shutdown a rogue pod that is not responding to the normal deletion commands - but it contains a generated name
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
ubuntu@onap-oom-obrien-rancher-e0:~$ kubectl get pods --field-selector=status.phase!=Running --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
onap onap-portal-portal-sdk-7c49c97955-smbws 0/2 Terminating 0 2d
#"spec": {"containers": [{},"name": "portal-sdk",
kubectl get pods --namespace onap -o jsonpath="{.items[*].spec.containers[0].name}"
portal-sdk
# so combining the two queries
kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}"
onap-portal-portal-sdk-7c49c97955-smbws
# and wrapping it with a delete command
export POD_NAME=$(kubectl get pods --field-selector=status.phase!=Running --all-namespaces -o jsonpath="{.items[*].metadata.name}")
echo "$POD_NAME"
kubectl delete pods $POD_NAME --grace-period=0 --force -n onap
ubuntu@onap-oom-obrien-rancher-e0:~$ sudo ./term.sh
onap-portal-portal-sdk-7c49c97955-smbws
warning: Immediate deletion does not wait for confirmation that the running resource has been terminated. The resource may continue to run on the cluster indefinitely.
pod "onap-portal-portal-sdk-7c49c97955-smbws" force deleted
|
Installing a pod
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# automatically via cd.sh in LOG-326
# get the dev.yaml and set any pods you want up to true as well as fill out the openstack parameters
sudo wget https://git.onap.org/oom/plain/kubernetes/onap/resources/environments/dev.yaml
sudo cp logging-analytics/deploy/cd.sh .
# or
# manually
cd oom/kubernetes/
sudo make clean
sudo make all
sudo make onap
sudo helm install local/onap -n onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set log.enabled=true
# adding another (so)
sudo helm upgrade local/onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set so.enabled=true --set log.enabled=true |
...
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
# override global docker pull policy for a single component
# set in oom/kubernetes/onap/values.yaml
# use global.pullPolicy in your -f yaml or a --set
|
Exec into a container of a pod with multiple containers
...
Code Block |
---|
language | bash |
---|
theme | Midnight |
---|
|
cd oom/kubernetes
# do a make if anything is modified in your charts
sudo make all
#sudo make onap
ubuntu@ip-172-31-19-23:~/oom/kubernetes$ sudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=false
# wait and check in another terminal for all containers to terminate
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-5mng9 0/1 CrashLoopBackOff 9 29m
onap onap-log-kibana-fc88b6b79-nt7sd 1/1 Running 0 35m
onap onap-log-logstash-c5z4d 1/1 Terminating 0 4h
onap onap-log-logstash-ftxfz 1/1 Terminating 0 4h
onap onap-log-logstash-gl59m 1/1 Terminating 0 4h
onap onap-log-logstash-nxsf8 1/1 Terminating 0 4h
onap onap-log-logstash-w8q8m 1/1 Terminating 0 4h
sudo helm upgrade -i onap local/onap --namespace onap --set portal.enabled=false
sudo vi portal/charts/portal-sdk/resources/config/deliveries/properties/ONAPPORTALSDK/logback.xml
sudo make portal
sudo make onap
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
sudo helm upgrade -i onap local/onap --namespace onap --set log.enabled=true
sudo helm upgrade -i onap local/onap --namespace onap --set portal.enabled=true
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-log
onap onap-log-elasticsearch-7557486bc4-2jd65 0/1 Init:0/1 0 31s
onap onap-log-kibana-fc88b6b79-5xqg4 0/1 Init:0/1 0 31s
onap onap-log-logstash-5vq82 0/1 Init:0/1 0 31s
onap onap-log-logstash-gvr9z 0/1 Init:0/1 0 31s
onap onap-log-logstash-qqzq5 0/1 Init:0/1 0 31s
onap onap-log-logstash-vbp2x 0/1 Init:0/1 0 31s
onap onap-log-logstash-wr9rd 0/1 Init:0/1 0 31s
ubuntu@ip-172-31-19-23:~$ kubectl get pods --all-namespaces | grep onap-portal
onap onap-portal-app-8486dc7ff8-nbps7 0/2 Init:0/1 0 9m
onap onap-portal-cassandra-8588fbd698-4wthv 1/1 Running 0 9m
onap onap-portal-db-7d6b95cd94-9x4kf 0/1 Running 0 9m
onap onap-portal-db-config-dpqkq 0/2 Init:0/1 0 9m
onap onap-portal-sdk-77cd558c98-5255r 0/2 Init:0/1 0 9m
onap onap-portal-widget-6469f4bc56-g8s62 0/1 Init:0/1 0 9m
onap onap-portal-zookeeper-5d8c598c4c-czpnz 1/1 Running 0 9m |
Kubernetes inter pod communication - using DNS service addresses
Try to use the service name (with or without the namespace) - not the service IP address for inter namespace communication (nodeports or ingress is only required outside the namespace)
For example log-ls:5044 or log-ls.onap:5044
Code Block |
---|
|
# example curl call between AAI and SDC
amdocs@obriensystemsu0:~$ kubectl exec -it -n onap onap-portalaai-aai-zookeepergraphadmin-5d8c598c4c7bd5fc9bd-czpnzl4v4z bash
Defaulting container name to aai-graphadmin.
root@aai-graphadmin:/opt/app/aai-graphadmin# 1/1 Running 0 9m |
...
curl http://sdc-fe:8181
<HTML><HEAD><TITLE>Error 404 - Not Found</TITLE><BODY><H2>Error 404 - Not Found.</H2>
</ul><hr><a href="http://eclipse.org/jetty"><img border=0 src="/favicon.ico"/></a> <a href="http://eclipse.org/jetty">Powered by Jetty:// 9.4.12.v20180830</a><hr/> |
docker if required
Code Block |
---|
sudo apt-get autoremove -y docker-engine |
...
Make sure the robot container is deployed - you may run directly from the kubernetes folder outside of the container - see https://git.onap.org/logging-analytics/tree/deploy/cd.sh#n297
Code Block |
---|
|
# make sure the robot container is up via --set robot.enabled=true
cd oom/kubernetes/robot
./ete-k8s.sh $ENVIRON health |
...