...
Deploying demo pod
Helm Deployment
also as of 20180918 use Mike Elliott s plugin - https://gerrit.onap.org/r/#/c/67071/
Code Block | ||
---|---|---|
| ||
# prereq - Makefile in k8s root - copied/modified-parent-chart from https://git.onap.org/oom/tree/kubernetes/Makefile - oom and logging-analytics cloned sudo git clone https://gerrit.onap.org/r/oom sudo git clone https://gerrit.onap.org/r/logging-analytics cd logging-analytics # pull patch in progress sudo git pull https://gerrit.onap.org/r/logging-analytics refs/changes/71/57171/9 # install onap log /oom/kubernetes$ sudo helm delete --purge onap /oom/kubernetes$ sudo make all /oom/kubernetes$ sudo make onap /oom/kubernetes$ sudo helm install local/onap -n onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set log.enabled=false /oom/kubernetes$ sudo helm upgrade -i onap local/onap --namespace onap -f onap/resources/environments/disable-allcharts.yaml --set log.enabled=true # install logdemo /logging-analytics/reference/logging-kubernetes$ sudo helm delete --purge logdemonode /logging-analytics/reference/logging-kubernetes$ sudo make all /logging-analytics/reference/logging-kubernetes$ sudo make logdemonode /logging-analytics/reference/logging-kubernetes$ sudo helm install local/logdemonode -n logdemonode --namespace onap --set logdemonode.enabled=true # rebuild after code change /logging-analytics/reference/logging-kubernetes$ sudo helm upgrade -i logdemonode local/logdemonode --namespace onap --set logdemonode.enabled=false # results onap logdemonode-logdemonode-5c8bffb468-rx2br 2/2 Running 0 1m onap onap-log-elasticsearch-7557486bc4-9h7gf 1/1 Running 0 40m onap onap-log-kibana-fc88b6b79-rkpzx 1/1 Running 0 40m onap onap-log-logstash-fpzc5 1/1 Running 0 40m onap log-es NodePort 10.43.17.89 <none> 9200:30254/TCP 39m onap log-es-tcp ClusterIP 10.43.120.133 <none> 9300/TCP 39m onap log-kibana NodePort 10.43.73.68 <none> 5601:30253/TCP 39m onap log-ls NodePort 10.43.107.55 <none> 5044:30255/TCP 39m onap log-ls-http ClusterIP 10.43.48.177 <none> 9600/TCP 39m onap logdemonode NodePort 10.43.0.35 <none> 8080:30453/TCP 55s ubuntu@ip-172-31-54-73:~$ curl http://dev.onap.info:30453/logging-demo/rest/health/health true # check records in elasticsearch ubuntu@ip-172-31-54-73:~$ curl http://dev.onap.info:30254/_search?q=* {"took":3,"timed_out":false,"_shards":{"total":21,"successful":21,"failed":0},"hits":{"total":2385953,"max_score":1.0,"hits":[{"_index":".kibana","_type":"index-pattern","_id":"logstash-*","_score":1.0,"_source":{"title":"logstash-*","timeFieldName":"@timestamp","notExpandable":true,"fields":"[{\"name\":\"@timestamp\",\"type\":\"date\",\"count\":0,\ # note if PV's are left over (a helm 2.9.1 issue after an upgrade from Kubernetes 1.8 to 1.10 via Rancher 1.6.18 - add a delete namespace sudo helm delete --purge onap kubectl delete namespace onap |
...