Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

Workstation has a network configuration

Fusion requires port rules set manually

...


Configuration


NAT config
VMWare Fusion 8.5

$ sudo vi /Library/Preferences/VMware\ Fusion/vmnet8/nat.conf

[incomingtcp]


# Use these with care - anyone can enter into your VM through these...

# The format and example are as follows:

#<external port number> = <VM's IP address>:<VM's port number>

#8080 = 172.16.3.128:80


8880 = 192.168.241.134:8880


obrienbiometrics:onap michaelobrien$ sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop

Stopped DHCP service on vmnet1

Disabled hostonly virtual adapter on vmnet1

Stopped DHCP service on vmnet8

Stopped NAT service on vmnet8

Disabled hostonly virtual adapter on vmnet8

Stopped all configured services on all networks

obrienbiometrics:onap michaelobrien$ $ sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start

Enabled hostonly virtual adapter on vmnet1

Started DHCP service on vmnet1

Started NAT service on vmnet8

Enabled hostonly virtual adapter on vmnet8

Started DHCP service on vmnet8

Started all configured services on all networks

$ curl 127.0.0.1:8880

{"id":"v1","type":"apiVersion","links":{"accounts":"http:\/\/127.0.0.1:8880\/v1\/accounts","agents":"http:\/\/127.0.0.1:8880\/v1\/agents","apiKeys":"http:\/\/127.0.0.1:8880\/



Bare RHEL 7.3 VM - Multi Node Cluster

In progress as of 20170701

https://kubernetes.io/docs/getting-started-guides/scratch/

https://github.com/kubernetes/kubernetes/releases/latest

https://github.com/kubernetes/kubernetes/releases/tag/v1.7.0

https://github.com/kubernetes/kubernetes/releases/download/v1.7.0/kubernetes.tar.gz

tar -xvf kubernetes.tar

optional build from source
cd kubernetes/

vi Vagrantfile
cat README.md
ls client/
git clone https://github.com/kubernetes/kubernetes
systemctl start docker
docker ps
cd kubernetes/
make quick-release

go directly to binaries

/run/media/root/sec/onap_kub/kubernetes/cluster

./get-kube-binaries.sh

export Path=/run/media/root/sec/onap_kub/kubernetes/client/bin:$PATH

[root@obrien-b2 server]# pwd

/run/media/root/sec/onap_kub/kubernetes/server

kubernetes-manifests.tar.gz  kubernetes-salt.tar.gz  kubernetes-server-linux-amd64.tar.gz  README

tar -xvf kubernetes-server-linux-amd64.tar.gz

/run/media/root/sec/onap_kub/kubernetes/server/kubernetes/server/bin


build images

[root@obrien-b2 etcd]# make  


[root@obrien-b2 etcd]# pwd
/run/media/root/sec/onap_kub/kubernetes/cluster/images/etcd

    /go/src/github.com/golang/glog (from $GOPATH)
src/k8s.io/kubernetes/cluster/images/etcd/attachlease/attachlease.go:26:2: cannot find package "golang.org/x/net/context" in any of:
    /usr/local/go/src/golang.org/x/net/context (from $GOROOT)
    /go/src/golang.org/x/net/context (from $GOPATH)


(go lang required - adjust google docs)

https://golang.org/doc/install?download=go1.8.3.linux-amd64.tar.gz



CoreOS on Vagrant on RHEL/OSX

(Yves alerted me to this) - currently blocked by the 19g VM size (changing the HD of the VM is unsupported in the VirtualBox driver)

https://coreos.com/kubernetes/docs/latest/kubernetes-on-vagrant-single.html

Implement OSX fix for Vagrant 1.9.6 https://github.com/mitchellh/vagrant/issues/7747


Adjust the VagrantFile for your system

NODE_VCPUS = 1

NODE_MEMORY_SIZE = 2048


to (for a 5820K on 64G for example)

NODE_VCPUS = 8

NODE_MEMORY_SIZE = 32768

curl -O https://storage.googleapis.com/kubernetes-release/release/v1.6.1/bin/darwin/amd64/kubectl

chmod +x kubectl

skipped (mv kubectl /usr/local/bin/kubectl) - already there

ls /usr/local/bin/kubectl 

git clone https://github.com/coreos/coreos-kubernetes.git

cd coreos-kubernetes/single-node/

vagrant box update

sudo ln -sf /usr/local/bin/openssl /opt/vagrant/embedded/bin/openssl

vagrant up

Wait at least 5 min (Yves is good)

(rerun from here)

export KUBECONFIG="${KUBECONFIG}:$(pwd)/kubeconfig"

kubectl config use-context vagrant-single

obrienbiometrics:single-node michaelobrien$ export KUBECONFIG="${KUBECONFIG}:$(pwd)/kubeconfig"

obrienbiometrics:single-node michaelobrien$ kubectl config use-context vagrant-single

Switched to context "vagrant-single".

obrienbiometrics:single-node michaelobrien$ kubectl proxy &

[1] 4079

obrienbiometrics:single-node michaelobrien$ Starting to serve on 127.0.0.1:8001

goto

http://localhost:8001/ui


$ kubectl get nodes

$ kubectl get service --all-namespaces

$ kubectl cluster-info

git clone ssh://michaelobrien@gerrit.onap.org:29418/oom

cd oom/kubernetes/oneclick/

obrienbiometrics:oneclick michaelobrien$ ./createAll.bash -n onap

**** Done ****obrienbiometrics:oneclick michaelobrien$ kubectl get service --all-namespaces

...

onap-vid              vid-server             10.3.0.31    <nodes>       8080:30200/TCP                                                               32s

obrienbiometrics:oneclick michaelobrien$ kubectl get pods --all-namespaces

NAMESPACE             NAME                                    READY     STATUS              RESTARTS   AGE

kube-system           heapster-v1.2.0-4088228293-3k7j1        2/2       Running             2          4h

kube-system           kube-apiserver-172.17.4.99              1/1       Running             1          4h

kube-system           kube-controller-manager-172.17.4.99     1/1       Running             1          4h

kube-system           kube-dns-782804071-jg3nl                4/4       Running             4          4h

kube-system           kube-dns-autoscaler-2715466192-k45qg    1/1       Running             1          4h

kube-system           kube-proxy-172.17.4.99                  1/1       Running             1          4h

kube-system           kube-scheduler-172.17.4.99              1/1       Running             1          4h

kube-system           kubernetes-dashboard-3543765157-qtnnj   1/1       Running             1          4h

onap-aai              aai-service-346921785-w3r22             0/1       Init:0/1            0          1m

...

reset

obrienbiometrics:single-node michaelobrien$ rm -rf ~/.vagrant.d/boxes/coreos-alpha/


OSX Minikube

curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl

 chmod +x ./kubectl 

sudo mv ./kubectl /usr/local/bin/kubectl

kubectl cluster-info

kubectl completion -h

brew install bash-completion

curl -Lo minikube https://storage.googleapis.com/minikube/releases/v0.19.0/minikube-darwin-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/

minikube start --vm-driver=vmwarefusion

kubectl run hello-minikube --image=gcr.io/google_containers/echoserver:1.4 --port=8080

kubectl expose deployment hello-minikube --type=NodePort

kubectl get pod

curl $(minikube service hello-minikube --url)

minikube stop

When upgrading from 0.19 to 0.20 - do a minikube delete

RHEL Kubernetes - Redhat 7.3 Enterprise Linux Host

Running onap kubernetes services in a single VM using Redhat Kubernetes for 7.3

Redhat provides 2 docker containers for the scheduler and nbi components and spins up 2 (# is scalable) pod containers for use by onap.

:8880

[root@obrien-mbp oneclick]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
ee02bbab2037 rhel7/kubernetes-scheduler "/usr/bin/kube-schedu" 40 hours ago Up 40 hours k8s_kube-scheduler.4e069b78_kube-scheduler-127.0.0.1

_default_78147ee23cd674839c926daaa58595e5_f4ada53e
f5031b2923ca rhel7/kubernetes-apiserver "/usr/bin/kube-apiser" 40 hours ago Up 40 hours k8s_kube-apiserver.41e368d_kube-apiserver-127.0.0.1_default_ab6617fd8366917b3d6b8c7bb6cbcfcf_8d671f6c
4c5e96ea1074 registry.access.redhat.com/rhel7/pod-infrastructure:latest "/pod" 40 hours ago Up 40 hours k8s_POD.ae8ee9ac_kube-scheduler-127.0.0.1_default_78147ee23cd674839c926daaa58595e5_0ce93fa0
3316c73036fc registry.access.redhat.com/rhel7/pod-infrastructure:latest "/pod" 40 hours ago Up 40 hours k8s_POD.ae8ee9ac_kube-apiserver-127.0.0.1_default_ab6617fd8366917b3d6b8c7bb6cbcfcf_8c0dda0f


Image Added

Kubernetes setup

Uninstall docker-se (we installed earlier)

Follow https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux_atomic_host/7/html-single/getting_started_with_kubernetes/

subscription-manager repos --enable=rhel-7-server-optional-rpms
sudo yum remove docker-ce
sudo yum remove docker-ce-selinux
yum install docker kubernetes-client kubernetes-node etcd
docker ps
systemctl disable firewalld
systemctl stop firewalld
yum install docker-distribution
systemctl start docker-distribution
systemctl enable docker-distribution
systemctl is-active docker-distribution
docker images
systemctl start docker
docker ps
docker images
docker pull registry.access.redhat.com/rhel7/kubernetes-apiserver
docker pull registry.access.redhat.com/rhel7/kubernetes-controller-mgr
docker pull registry.access.redhat.com/rhel7/kubernetes-scheduler
mkdir /etc/kubernetes/manifests
vi /etc/kubernetes/manifests/apiserver-pod.json
vi /etc/kubernetes/manifests/controller-mgr-pod.json
vi /etc/kubernetes/manifests/scheduler-pod.json
vi /etc/kubernetes/kubelet 
vi kubestart.sh
chmod 777 kubestart.sh

[root@obrien-mbp opt]# ./kubestart.sh 

[root@obrien-mbp opt]# ss -tulnp | grep -E "(kube)|(etcd)"


[root@obrien-mbp opt]# curl -s -L http://localhost:2379/version
{"etcdserver":"3.1.7","etcdcluster":"3.1.0"}[root@obrien-mbp opt]#