This page details the Rancher RI installation independent of the deployment target (Physical, Openstack, AWS, Azure, GCD)
Pre-requisite
The supported versions are as follows:
ONAP Release | Rancher | Kubernetes | Helm | Kubectl | Docker |
---|---|---|---|---|---|
Amsterdam | 1.6.10 | 1.7.7 | 2.3.0 | 1.7.7 | 1.12.x |
Beijing | 1.6.14 | 1.8.6 | 2.6.1+ | 1.8.6 | 17.03-ce |
Amsterdam Branch Installation
Rancher 1.6.10
Beijing Branch Installation
Rancher 1.6.14
Experimental Installation
Rancher 2.0
#!/bin/bash # from # https://rancher.com/blog/ SERVER=amsterdam.onap.info CLUSTERNAME=yournewcluster NEWPASSWORD=thisisyournewpassword curl https://releases.rancher.com/install-docker/17.03.sh | sh apt install jq -y #docker run -d -p 80:80 -p 443:443 --name rancher-server rancher/server:preview --http-only docker run -d -p 80:80 -p 443:443 --name rancher-server rancher/server:preview while ! curl -k https://localhost/ping; do sleep 3; done # Login LOGINRESPONSE=`curl -s 'https://127.0.0.1/v3-public/localProviders/local?action=login' -H 'content-type: application/json' --data-binary '{"username":"admin","password":"admin"}' --insecure` LOGINTOKEN=`echo $LOGINRESPONSE | jq -r .token` echo "LOGINTOKEN: $LOGINTOKEN" # Change password curl -s 'https://127.0.0.1/v3/users?action=changepassword' -H 'content-type: application/json' -H "Authorization: Bearer $LOGINTOKEN" --data-binary '{"currentPassword":"admin","newPassword":"thisisyournewpassword"}' --insecure # Create API key APIRESPONSE=`curl -s 'https://127.0.0.1/v3/token' -H 'content-type: application/json' -H "Authorization: Bearer $LOGINTOKEN" --data-binary '{"type":"token","description":"automation"}' --insecure` # Extract and store token APITOKEN=`echo $APIRESPONSE | jq -r .token` echo "APITOKEN: $APITOKEN" # Create cluster CLUSTERRESPONSE=`curl -s 'https://127.0.0.1/v3/cluster' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" --data-binary '{"type":"cluster","nodes":[],"rancherKubernetesEngineConfig":{"ignoreDockerVersion":true},"name":"'$CLUSTERNAME'"}' --insecure` echo "CLUSTERRESPONSE: $CLUSTERRESPONSE" # Extract clusterid to use for generating the docker run command CLUSTERID=`echo $CLUSTERRESPONSE | jq -r .id` echo "CLUSTERRID: $CLUSTERID" # Generate docker run AGENTIMAGE=`curl -s -H "Authorization: Bearer $APITOKEN" https://127.0.0.1/v3/settings/agent-image --insecure | jq -r .value` ROLEFLAGS="--etcd --controlplane --worker" RANCHERSERVER="https://$SERVER" # Generate token (clusterRegistrationToken) AGENTTOKEN=`curl -s 'https://127.0.0.1/v3/clusterregistrationtoken' -H 'content-type: application/json' -H "Authorization: Bearer $APITOKEN" --data-binary '{"type":"clusterRegistrationToken","clusterId":"'$CLUSTERID'"}' --insecure | jq -r .token` echo "AGENTTOKEN: $AGENTTOKEN" # Retrieve CA certificate and generate checksum CACHECKSUM=`curl -s -H "Authorization: Bearer $APITOKEN" https://127.0.0.1/v3/settings/cacerts --insecure | jq -r .value | sha256sum | awk '{ print $1 }'` CERT=`curl -s -H "Authorization: Bearer $APITOKEN" https://127.0.0.1/v3/settings/cacerts --insecure | jq -r .value` # Assemble the docker run command AGENTCOMMAND="docker run -d --restart=unless-stopped -v /var/run/docker.sock:/var/run/docker.sock --net=host $AGENTIMAGE $ROLEFLAGS --server $RANCHERSERVER --token $AGENTTOKEN --ca-checksum $CACHECKSUM" # run the agent echo "AGENTCOMMAND: $AGENTCOMMAND" $AGENTCOMMAND # install helm, kubectl curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.8.6/bin/linux/amd64/kubectl chmod +x ./kubectl sudo mv ./kubectl /usr/local/bin/kubectl mkdir ~/.kube #helm version wget http://storage.googleapis.com/kubernetes-helm/helm-v2.6.1-linux-amd64.tar.gz tar -zxvf helm-v2.6.1-linux-amd64.tar.gz sudo mv linux-amd64/helm /usr/local/bin/helm # wait for cluster #read -p "wait for cluster up before generating .kube/config" # replace with apitoken FILE="config" /bin/cat <<EOM >$FILE apiVersion: v1 kind: Config clusters: - name: "$CLUSTERNAME" cluster: server: "https://$SERVER/k8s/clusters/$CLUSTERID" api-version: v1 certificate-authority-data: "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3a\\ kNDQWRhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFvTVJJd0VBWURWUVFLRXdsM\\ GFHVXQKY21GdVkyZ3hFakFRQmdOVkJBTVRDV05oZEhSc1pTMWpZVEFlRncweE9EQXlNVGd3T\\ mpFeE5USmFGdzB5T0RBeQpNVFl3TmpFeE5USmFNQ2d4RWpBUUJnTlZCQW9UQ1hSb1pTMXlZV\\ zVqYURFU01CQUdBMVVFQXhNSlkyRjBkR3hsCkxXTmhNSUlCSWpBTkJna3Foa2lHOXcwQkFRR\\ UZBQU9DQVE4QU1JSUJDZ0tDQVFFQXgwSDN6UzE2c2ZaaGNrYkcKZVhRRUFkbDQ4ZGp1ejQyW\\ HRKUHFKU3pkbjh2dERpSWd0VTBWY2JLTE9kWjNXWWRNb1l5Z0FuOHBacXZ0RUhGcApHQjVYc\\ XJNaExPNHJNb0pzekFaU1drdkpoYkd2d1BxUkN2SmQ4dU9JaWJ6TXFmSjlHM2IweUg2QWx6Z\\ 1ROaGdxCmc5bE8wcG9ZYkYyOXhIN2pHYWJkemxsOFFxMzFIdjA5enNlREJ3M1FNcHlwTEpJY\\ mRHM2JjeXBrNjRsMEpSckQKTDVES3o0ZnNVREltK09wTTZ0dktuU01QNkFlbmtZSm1mZnAzc\\ XhudlpVMWZvQ0pGY0NLZUthTVluT0lOcVNTVAo1Mi9mM2h3MVd1U3I1bTc2bVFuWmFSYkgzU\\ 0lSYTlaNC9NTEo3TExjVWdYTDJzOENKMmxFaGJQazBZcUJrQ0gzCkRmTGhYUUlEQVFBQm95T\\ XdJVEFPQmdOVkhROEJBZjhFQkFNQ0FxUXdEd1lEVlIwVEFRSC9CQVV3QXdFQi96QU4KQmdrc\\ WhraUc5dzBCQVFzRkFBT0NBUUVBT1dvQW9uWGRlNHQ0eUYxYkpIbEdSb0pyaUFKcWR4ZmJ3c\\ mUxVjBTYQpCblg2MTE0cU1FOUhzMTIzLzgrbGoyWWliTThFRHFwRWJ5Y2k0WWNXc2Q0MWljc\\ 1FwYWRoN0RTbks5WkM3Y0FLCnJKM1cyc1NaTFBwdnIyTEN2YU9GUFEvTGFqbUxKVTdKWGwwR\\ DFJbnRMRUp0Zkl2RU01all2U255YXVwSE5raTIKOEUxZEU4cjVRcjhhT0NOQndlbEl1cFJrM\\ 25Yb015QmtFYUZxTUhhQzkvRGhBNlRXZ1F3VmlWUTZCUnBuNGllZgpnejBONlBIQ2ppY2RhV\\ mI5Y1IzTjFHNGNra2lHb3ZSdEVwZ1QwalRzUVREZzBoVFd5dGZYOHJOVlZlMGl2TEg1CldCM\\ kxITGFrcEtoK3dkWk1QNVFBY3RZNDluWjV5djlHUnRIQitudG5nNTRXZ0E9PQotLS0tLUVOR\\ CBDRVJUSUZJQ0FURS0tLS0t" users: - name: "admin" user: token: "$APITOKEN" contexts: - name: "$CLUSTERNAME" context: user: "admin" cluster: "$CLUSTERNAME" current-context: "$CLUSTERNAME" EOM # certificate-authority-data is slightly different for every install echo "When the cluster is ready - copy the generated 'Kubeconfig File' text to ~/.kube/config" echo "then test using 'kubectl get pods --all-namespaces'" #cp config ~/.kube/ kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE ingress-nginx default-http-backend-66b447d9cf-t4qxx 1/1 Running 0 44m ingress-nginx nginx-ingress-controller-vchhb 0/1 CrashLoopBackOff 13 44m kube-system kube-dns-6f7666d48c-9zmtf 3/3 Running 0 44m kube-system kube-dns-autoscaler-54fd4c549b-6bm5b 1/1 Running 0 44m kube-system kube-flannel-jgx9x 2/2 Running 0 44m
Result
root@ip-172-31-84-230:~# docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 66e823e8ebb8 gcr.io/google_containers/defaultbackend@sha256:865b0c35e6da393b8e80b7e3799f777572399a4cff047eb02a81fa6e7a48ed4b "/server" 3 minutes ago Up 3 minutes k8s_default-http-backend_default-http-backend-66b447d9cf-t4qxx_ingress-nginx_54afe3f8-1455-11e8-b142-169c5ae1104e_0 7c9a6eeeb557 rancher/k8s-dns-sidecar-amd64@sha256:4581bf85bd1acf6120256bb5923ec209c0a8cfb0cbe68e2c2397b30a30f3d98c "/sidecar --v=2 --..." 3 minutes ago Up 3 minutes k8s_sidecar_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0 72487327e65b rancher/pause-amd64:3.0 "/pause" 3 minutes ago Up 3 minutes k8s_POD_default-http-backend-66b447d9cf-t4qxx_ingress-nginx_54afe3f8-1455-11e8-b142-169c5ae1104e_0 d824193e7404 rancher/k8s-dns-dnsmasq-nanny-amd64@sha256:bd1764fed413eea950842c951f266fae84723c0894d402a3c86f56cc89124b1d "/dnsmasq-nanny -v..." 3 minutes ago Up 3 minutes k8s_dnsmasq_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0 89bdd61a99a3 rancher/k8s-dns-kube-dns-amd64@sha256:9c7906c0222ad6541d24a18a0faf3b920ddf66136f45acd2788e1a2612e62331 "/kube-dns --domai..." 3 minutes ago Up 3 minutes k8s_kubedns_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0 7c17fc57aef9 rancher/cluster-proportional-autoscaler-amd64@sha256:77d2544c9dfcdfcf23fa2fcf4351b43bf3a124c54f2da1f7d611ac54669e3336 "/cluster-proporti..." 3 minutes ago Up 3 minutes k8s_autoscaler_kube-dns-autoscaler-54fd4c549b-6bm5b_kube-system_51afa75f-1455-11e8-b142-169c5ae1104e_0 024269154b8b rancher/pause-amd64:3.0 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-dns-6f7666d48c-9zmtf_kube-system_51b35ec8-1455-11e8-b142-169c5ae1104e_0 48e039d15a90 rancher/pause-amd64:3.0 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-dns-autoscaler-54fd4c549b-6bm5b_kube-system_51afa75f-1455-11e8-b142-169c5ae1104e_0 13bec6fda756 rancher/pause-amd64:3.0 "/pause" 3 minutes ago Up 3 minutes k8s_POD_nginx-ingress-controller-vchhb_ingress-nginx_54aede27-1455-11e8-b142-169c5ae1104e_0 332073b160c9 rancher/coreos-flannel-cni@sha256:3cf93562b936004cbe13ed7d22d1b13a273ac2b5092f87264eb77ac9c009e47f "/install-cni.sh" 3 minutes ago Up 3 minutes k8s_install-cni_kube-flannel-jgx9x_kube-system_4fb9b39b-1455-11e8-b142-169c5ae1104e_0 79ef0da922c5 rancher/coreos-flannel@sha256:93952a105b4576e8f09ab8c4e00483131b862c24180b0b7d342fb360bbe44f3d "/opt/bin/flanneld..." 3 minutes ago Up 3 minutes k8s_kube-flannel_kube-flannel-jgx9x_kube-system_4fb9b39b-1455-11e8-b142-169c5ae1104e_0 300eab7db4bc rancher/pause-amd64:3.0 "/pause" 3 minutes ago Up 3 minutes k8s_POD_kube-flannel-jgx9x_kube-system_4fb9b39b-1455-11e8-b142-169c5ae1104e_0 1597f8ba9087 rancher/k8s:v1.8.7-rancher1-1 "/opt/rke/entrypoi..." 3 minutes ago Up 3 minutes kube-proxy 523034c75c0e rancher/k8s:v1.8.7-rancher1-1 "/opt/rke/entrypoi..." 4 minutes ago Up 4 minutes kubelet 788d572d313e rancher/k8s:v1.8.7-rancher1-1 "/opt/rke/entrypoi..." 4 minutes ago Up 4 minutes scheduler 9e520f4e5b01 rancher/k8s:v1.8.7-rancher1-1 "/opt/rke/entrypoi..." 4 minutes ago Up 4 minutes kube-controller 29bdb59c9164 rancher/k8s:v1.8.7-rancher1-1 "/opt/rke/entrypoi..." 4 minutes ago Up 4 minutes kube-api 2686cc1c904a rancher/coreos-etcd:v3.0.17 "/usr/local/bin/et..." 4 minutes ago Up 4 minutes etcd a1fccc20c8e7 rancher/agent:v2.0.2 "run.sh --etcd --c..." 5 minutes ago Up 5 minutes unruffled_pike 6b01cf361a52 rancher/server:preview "rancher --k8s-mod..." 5 minutes ago Up 5 minutes 0.0.0.0:80->80/tcp, 0.0.0.0:443->443/tcp rancher-server