From d06457ff14fc994305a36dd08cf27244fc8c9319 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sat, 4 Jul 2020 17:09:39 +0300 Subject: [PATCH 01/32] =?UTF-8?q?=D0=BF=D0=BE=D1=81`=D0=B0=D0=B4=D0=BA?= =?UTF-8?q?=D0=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 37 ++++++++++++++++++++++++++++++------- Makefile | 2 ++ app/deploy-kubesec.sh | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 66 insertions(+), 7 deletions(-) create mode 100644 app/deploy-kubesec.sh diff --git a/.travis.yml b/.travis.yml index 3806e62..8bd9223 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,7 +104,7 @@ matrix: # after_success: # - deactivate - + # - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK # dist: bionic # arch: amd64 @@ -131,7 +131,34 @@ matrix: # after_success: # - deactivate - - name: "istio service mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK + # - name: "istio service mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --config=app/kind-config.yaml + # - sudo kubectl cluster-info --context kind-kind + # - sudo make deploy-istio + # # - sudo kind delete cluster --name cilium-testing + # after_success: + # - deactivate + + - name: "kubesec w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 addons: @@ -150,11 +177,7 @@ matrix: - source ~venvpy3/bin/activate - pip install -r requirements.txt script: - - sudo make deploy-kind - - sudo kind create cluster --config=app/kind-config.yaml - - sudo kubectl cluster-info --context kind-kind - - sudo make deploy-istio - # - sudo kind delete cluster --name cilium-testing + - sudo make deploy-kubesec after_success: - deactivate diff --git a/Makefile b/Makefile index 52fa1d7..ad00e0b 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,8 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" +deploy-kubesec: + bash app/deploy-kubesec.sh deploy-kind: bash deploy-kind.sh deploy-microservices: diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh new file mode 100644 index 0000000..c744cf2 --- /dev/null +++ b/app/deploy-kubesec.sh @@ -0,0 +1,34 @@ +#!/bin/bash +echo "=============================deploy kind=============================================================" +docker version +export KIND_VERSION="0.8.1" +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v$KIND_VERSION/kind-$(uname)-amd64 +chmod +x ./kind +mv ./kind /usr/local/bin/kind +kind get clusters #see the list of kind clusters +kind get clusters +kubectl config get-contexts #kind is prefixed to the context and cluster names, for example: kind-istio-testing +echo "=============================kubesec=============================================================" +#https://github.com/controlplaneio/kubesec +go get -u github.com/controlplaneio/kubesec/cmd/kubesec + +#Command line usage +cat < kubesec-test.yaml +apiVersion: v1 +kind: Pod +metadata: + name: kubesec-demo +spec: + containers: + - name: kubesec-demo + image: gcr.io/google-samples/node-hello:1.0 + securityContext: + readOnlyRootFilesystem: true +EOF +kubesec scan kubesec-test.yaml + +#Docker usage +docker run -i kubesec/kubesec:512c5e0 scan /dev/stdin < kubesec-test.yaml + +# Kubesec HTTP Server +kubesec http 8080 & From 57f145a7fc37fe8c76edf673c262e11a53e7921a Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sat, 4 Jul 2020 20:56:59 +0300 Subject: [PATCH 02/32] openfaas init --- .travis.yml | 29 ++++++++- Makefile | 16 +++++ app/deploy-openfaas.sh | 145 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 app/deploy-openfaas.sh diff --git a/.travis.yml b/.travis.yml index 8bd9223..a6ba229 100644 --- a/.travis.yml +++ b/.travis.yml @@ -158,6 +158,33 @@ matrix: # after_success: # - deactivate + - name: "openfaas w snapped kubectl helm Python 3.7 on bionic" #OK + dist: bionic + arch: amd64 + addons: + snaps: + - name: kubectl + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + - name: helm + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo kind create cluster --name openfaas-testing + - sudo kubectl config use-context kind-openfaas-testing + - sudo make deploy-openfaas + - sudo kind delete cluster --name openfaas-testing + after_success: + - deactivate + - name: "kubesec w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 @@ -177,7 +204,7 @@ matrix: - source ~venvpy3/bin/activate - pip install -r requirements.txt script: - - sudo make deploy-kubesec + - sudo make deploy-kubesec after_success: - deactivate diff --git a/Makefile b/Makefile index ad00e0b..7a0592a 100644 --- a/Makefile +++ b/Makefile @@ -1,32 +1,48 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" +deploy-openfaas: + bash app/deploy-openfaas.sh + deploy-kubesec: bash app/deploy-kubesec.sh + deploy-kind: bash deploy-kind.sh + deploy-microservices: bash app/deploy-microservices.sh + deploy-cilium-cluster-mesh: bash app/deploy-cilium-cluster-mesh.sh + deploy-cilium-hubble-dist: bash app/deploy-cilium-hubble-dist.sh + deploy-cilium-hubble-local: bash app/deploy-cilium-hubble-local.sh + deploy-cilium: bash app/deploy-cilium.sh + deploy-kubeflow: bash app/deploy-kubeflow.sh + deploy-openesb: bash app/deploy-openesb.sh + deploy-weavescope: bash app/deploy-weavescope.sh + deploy-istio: bash app/deploy-istio.sh + deploy-dashboard: bash app/deploy-dashboard.sh + deploy-dashboard-helm: bash app/deploy-dashboard-helm.sh + push-image: docker push $(IMAGE) .PHONY: deploy-kind deploy-openesb deploy-dashboard deploy-dashboard-helm deploy-istio push-image diff --git a/app/deploy-openfaas.sh b/app/deploy-openfaas.sh new file mode 100644 index 0000000..e8e98d4 --- /dev/null +++ b/app/deploy-openfaas.sh @@ -0,0 +1,145 @@ +#!/bin/bash +set -eox pipefail #safety for script + +# https://itnext.io/deploy-your-first-serverless-function-to-kubernetes-232307f7b0a9 +echo "============================OpenFaaS ==============================================================" +`curl -sSLf https://cli.openfaas.com | sh` #install the OpenFaaS CLI +`curl -sSLf https://dl.get-arkade.dev | sh` #install arkade + +arkade install openfaas #use arkade to install OpenFaaS +arkade info openfaas + +# Forward the gateway to your machine +kubectl rollout status -n openfaas deploy/gateway +kubectl port-forward -n openfaas svc/gateway 8080:8080 & + +# Now log in using the CLI +PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo) +echo -n $PASSWORD | faas-cli login --username admin --password-stdin + +faas-cli store deploy nodeinfo +# Check to see "Ready" status +faas-cli describe nodeinfo# Invoke +echo | faas-cli invoke nodeinfo +echo | faas-cli invoke nodeinfo --async + +# curl http://localhost:8080 +# Get the password so you can open the UI +echo $PASSWORD + +# faas-cli template store list +# Sign up for a Docker Hub account, so that you can store your functions for free. +# export OPENFAAS_PREFIX="DOCKER_HUB_USERNAME" +export OPENFAAS_PREFIX=$DOCKER_USERNAME #travisci env var +faas-cli new --lang python3 serverless + + + + # - echo "=============================Inspection=============================================================" + # - kubectl get pod -o wide #The IP column will contain the internal cluster IP address for each pod. + # - kubectl get service --all-namespaces # find a Service IP,list all services in all namespaces + # - echo "=============================openEBS=============================================================" + # - pushd $(pwd) && cd app + # - sudo kubectl apply -f https://openebs.github.io/charts/openebs-operator.yaml #install OpenEBS + # - kubectl get service --all-namespaces # find a Service IP,list all services in all namespaces + # - kubectl get pods -n openebs -l openebs.io/component-name=openebs-localpv-provisioner #Observe localhost provisioner pod + # - kubectl get sc #Check the storage Class + # # openesb component list + # #https://github.com/openebs/openebs/blob/master/k8s/openebs-operator.yaml + # - | + # echo "Waiting for openebs-localpv-provisioner component to be ready ..." + # for i in {1..60}; do # Timeout after 5 minutes, 150x5=300 secs + # if sudo kubectl get pods --namespace=openebs -l openebs.io/component-name=openebs-localpv-provisioner | grep Running ; then + # break + # fi + # sleep 5 + # done + # - | + # echo "Waiting for maya-apiserver component to be ready ..." + # for i in {1..60}; do # Timeout after 5 minutes, 150x5=300 secs + # if sudo kubectl get pods --namespace=openebs -l openebs.io/component-name=maya-apiserver | grep Running ; then + # break + # fi + # sleep 5 + # done + # - | + # echo "Waiting for openebs-ndm-operator component to be ready ..." + # for i in {1..60}; do # Timeout after 5 minutes, 150x5=300 secs + # if sudo kubectl get pods --namespace=openebs -l openebs.io/component-name=openebs-ndm-operator | grep Running ; then + # break + # fi + # sleep 5 + # done + # - | + # echo "Waiting for openebs to be ready ..." + # for i in {1..60}; do # Timeout after 2 minutes, 60x2=300 secs + # if sudo kubectl get pods --namespace=openebs | grep Running ; then + # break + # fi + # sleep 2 + # done + # - sudo kubectl get pods --all-namespaces + # - sudo kubectl get pods --namespace=openebs + # - popd + # - echo "=============================openEBS=============================================================" + # #Create a PVC #Create an Nginx Pod which consumes OpenEBS Local PV Hospath Storage #openEBS + # #HPA + # # - pushd $(pwd) && cd hpa + # # - minikube addons list + # # - sudo minikube addons enable metrics-server + # # - minikube addons list + # # - sudo kubectl get pods -n kube-system + # # - sudo kubectl logs -n kube-system deploy/metrics-server + # # - sudo kubectl get svc -n kube-system + # # # - sudo ping 10.96.56.228 -c 1 # ping metrics-server ClusterIP + # # - kubectl get pods -n kube-system -o wide + # # - ping 10.224.13.23 -c 1 # # ping metrics-server pod IP + # # - sudo systemctl status kube-apiserver -l + # # - sudo kubectl top node + # # - sudo kubectl top pod + # # - sudo kubectl describe hpa + # # - | + # # sudo minikube start \ + # # --extra-config=controller-manager.horizontal-pod-autoscaler-upscale-delay=1m \ + # # --extra-config=controller-manager.horizontal-pod-autoscaler-downscale-delay=1m \ + # # --extra-config=controller-manager.horizontal-pod-autoscaler-sync-period=10s \ + # # --extra-config=controller-manager.horizontal-pod-autoscaler-downscale-stabilization=1m + # # - sudo kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 + # # - sudo kubectl run --generator=run-pod/v1 -it --rm load-generator --image=busybox /bin/sh #Load generator + # # - while true; do wget -q -O- http://php-apache; done + # # - kubectl get --raw /apis/metrics.k8s.io/v1beta1 + # # - kubectl get pods -n kube-system | grep metrics-server + # # - kubectl get --raw /apis/metrics.k8s.io/v1beta1/nodes | jq '.' + # # - kubectl get --raw /apis/metrics.k8s.io/v1beta1/pods | jq '.' + # # - | + # # kubectl get --raw /apis/metrics.k8s.io/v1beta1/nodes \ + # # | jq '[.items [] | {nodeName: .metadata.name, nodeCpu: .usage.cpu, nodeMemory: .usage.memory}]' + # # - popd + # #HPA + # # #nfs-pv-storage + # - pushd $(pwd) && cd nfs-pv-storage + # - sudo apt install nfs-kernel-server + # - sudo mkdir -p /pv/nfs/test-volume + # - sudo chmod 777 /pv/nfs/test-volume + # - sudo systemctl status nfs-server + # - sudo systemctl stop nfs-server + # - sudo systemctl start nfs-server + # - sudo systemctl restart nfs-server + # # - sudo vi /etc/exports + # # /mnt/nfs/test-volume *(rw,sync,no_subtree_check,insecure) + # # /pv/nfs/test-volume *(rw,sync,no_subtree_check,insecure) + # # - sudo exportfs -a + # # - sudo exportfs -v + # # - sudo kubectl expose deploy nginx-deploy --port 80 --type NodePort + # # - sudo apt-get update + # # - sudo apt-get install -qqy curl + # # - curl http://localhost/ + # - popd + # # #nfs-pv-storage + # # #NGINX as a sample application + # # - sudo kubectl create deployment nginx --image=nginx + # # - sudo kubectl get deployments + # # - sudo kubectl get pods + # # - sudo kubectl get all --all-namespaces + # # #NGINX as a sample application + # From b416f53ee9190e48dc3a7c01140de3b0e31d34b2 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 13:07:05 +0300 Subject: [PATCH 03/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D0=BE=D0=B3=D0=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/deploy-istio-kind.sh | 129 ++++++++++++++++++++++++++++++++++++ app/deploy-istio.sh | 22 +++--- app/deploy-kind-gardener.sh | 47 +++++++++++++ app/kind_gardener.sh | 40 +++++++++++ 4 files changed, 227 insertions(+), 11 deletions(-) create mode 100644 app/deploy-istio-kind.sh create mode 100644 app/deploy-kind-gardener.sh create mode 100644 app/kind_gardener.sh diff --git a/app/deploy-istio-kind.sh b/app/deploy-istio-kind.sh new file mode 100644 index 0000000..e9ea597 --- /dev/null +++ b/app/deploy-istio-kind.sh @@ -0,0 +1,129 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +#https://kind.sigs.k8s.io/docs/user/quick-start/ +#https://istio.io/docs/setup/platform-setup/kind/ +echo "=============================kind istio=============================================================" +docker version +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64 +chmod +x ./kind +sudo mv ./kind /usr/local/bin/kind +kind get clusters #see the list of kind clusters +kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind +kind get clusters +# - sudo snap install kubectl --classic +kubectl config get-contexts #list the local Kubernetes contexts +kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl + +#https://istio.io/latest/docs/setup/getting-started/ +echo "===============================Install istio===========================================================" +#Download Istio +#/bin/sh -c 'curl -L https://istio.io/downloadIstio | sh -' #download and extract the latest release automatically (Linux or macOS) +export ISTIORELEASE="1.6" +export ISTIOVERSION="1.6.4" +/bin/sh -c 'curl -L https://istio.io/downloadIstio | ISTIO_VERSION=$ISTIOVERSION sh -' #download a specific version + +cd istio-* #Move to the Istio package directory. For example, if the package is istio-1.6.0 +export PATH=$PWD/bin:$PATH #Add the istioctl client to your path, The istioctl client binary in the bin/ directory. +#precheck inspects a Kubernetes cluster for Istio install requirements +istioctl experimental precheck #https://istio.io/docs/reference/commands/istioctl/#istioctl-experimental-precheck +istioctl version +istioctl manifest apply --set profile=demo #Install Istio, use the demo configuration profile +kubectl label namespace default istio-injection=enabled #Add a namespace label to instruct Istio to automatically inject Envoy sidecar proxies when you deploy your application later + +#Deploy the sample application +kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml #Deploy the Bookinfo sample application: +kubectl get service --all-namespaces #list all services in all namespace +kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. +kubectl get pods +for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins + if kubectl get pods --namespace=istio-system |grep Running ; then + break + fi + sleep 2 +done +kubectl get service --all-namespaces #list all services in all namespace + +# see if the app is running inside the cluster and serving HTML pages by checking for the page title in the response +#error: unable to upgrade connection: container not found ("ratings") +#kubectl exec $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" +#interactive shell +#kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" +# - | +# kubectl exec -it $(kubectl get pod \ +# -l app=ratings \ +# -o jsonpath='{.items[0].metadata.name}') \ +# -c ratings \ +# -- curl productpage:9080/productpage | grep -o ".*" Simple Bookstore App + + +#Open the application to outside traffic +#The Bookinfo application is deployed but not accessible from the outside. To make it accessible, you need to create an Istio Ingress Gateway, which maps a path to a route at the edge of your mesh. +kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml #Associate this application with the Istio gateway +istioctl analyze #Ensure that there are no issues with the configuration + +#Other platforms +#Determining the ingress IP and ports +#If the EXTERNAL-IP value is set, your environment has an external load balancer that you can use for the ingress gateway. +#If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway. +#access the gateway using the service’s node port. +kubectl get svc istio-ingressgateway -n istio-system #determine if your Kubernetes cluster is running in an environment that supports external load balancers + +#external load balancer +# #Follow these instructions if you have determined that your environment has an external load balancer. +# # If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway,access the gateway using the service’s node port. +# - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +# - export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}') +# - export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}') + +# #In certain environments, the load balancer may be exposed using a host name, instead of an IP address. +# #the ingress gateway’s EXTERNAL-IP value will not be an IP address, but rather a host name + +#failed to set the INGRESS_HOST environment variable, correct the INGRESS_HOST value + export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') + +#Follow these instructions if your environment does not have an external load balancer and choose a node port instead +#Set the ingress ports: +export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') #Set the ingress ports +export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}') #Set the ingress ports + +#INGRESS_HOST: unbound variable +export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL +# echo $GATEWAY_URL #Ensure an IP address and port were successfully assigned to the environment variable +# echo http://$GATEWAY_URL/productpage #Verify external access,retrieve the external address of the Bookinfo application +# echo $(curl http://$GATEWAY_URL/productpage) + +#View the dashboard +#istioctl dashboard kiali #optional dashboards installed by the demo installation,Access the Kiali dashboard. The default user name is admin and default password is admin +#istioctl dashboard kiali # interactive shell + + +#Uninstall +#Cleanup #https://istio.io/latest/docs/examples/bookinfo/#cleanup +#Delete the routing rules and terminate the application pods +#samples/bookinfo/platform/kube/cleanup.sh +# export ISTIORELEASE="1.6" +# export NAMESPACE="default" #error: the path "/home/travis/build/githubfoam/kind-travisci/istio-1.6.4/bookinfo.yaml" does not exist +# export NAMESPACE="istio-system" #error: the path "/home/travis/build/githubfoam/kind-travisci/istio-1.6.4/bookinfo.yaml" does not exist +# /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/istio/istio/release-$ISTIORELEASE/samples/bookinfo/platform/kube/cleanup.sh)" +#bash app/cleanup.sh #bash: app/cleanup.sh: No such file or directory +# /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" + +#Confirm shutdown +# kubectl get virtualservices --namespace=istio-system #-- there should be no virtual services +# kubectl get destinationrules --namespace=istio-system #-- there should be no destination rules +# kubectl get gateway --namespace=istio-system #-- there should be no gateway +# kubectl get pods --namespace=istio-system #-- the Bookinfo pods should be deleted + + +# #The Istio uninstall deletes the RBAC permissions and all resources hierarchically under the istio-system namespace +# #It is safe to ignore errors for non-existent resources because they may have been deleted hierarchically. +# /bin/sh -eu -xv -c 'istioctl manifest generate --set profile=demo | kubectl delete -f -' + +#The istio-system namespace is not removed by default. +#If no longer needed, use the following command to remove it + # kubectl delete namespace istio-system diff --git a/app/deploy-istio.sh b/app/deploy-istio.sh index e9ea597..035d0b7 100644 --- a/app/deploy-istio.sh +++ b/app/deploy-istio.sh @@ -7,17 +7,17 @@ set -o xtrace #https://kind.sigs.k8s.io/docs/user/quick-start/ #https://istio.io/docs/setup/platform-setup/kind/ -echo "=============================kind istio=============================================================" -docker version -curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64 -chmod +x ./kind -sudo mv ./kind /usr/local/bin/kind -kind get clusters #see the list of kind clusters -kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind -kind get clusters -# - sudo snap install kubectl --classic -kubectl config get-contexts #list the local Kubernetes contexts -kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl +# echo "=============================kind istio=============================================================" +# docker version +# curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64 +# chmod +x ./kind +# sudo mv ./kind /usr/local/bin/kind +# kind get clusters #see the list of kind clusters +# kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind +# kind get clusters +# # - sudo snap install kubectl --classic +# kubectl config get-contexts #list the local Kubernetes contexts +# kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl #https://istio.io/latest/docs/setup/getting-started/ echo "===============================Install istio===========================================================" diff --git a/app/deploy-kind-gardener.sh b/app/deploy-kind-gardener.sh new file mode 100644 index 0000000..f63d83f --- /dev/null +++ b/app/deploy-kind-gardener.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace + +#https://istio.io/docs/setup/platform-setup/gardener/ +#https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md +echo "=============================kind istio=============================================================" +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64 +chmod +x ./kind + +mv ./kind /usr/local/bin/kind +kind get clusters #see the list of kind clusters +kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind +kind get clusters + +snap install kubectl --classic + +kubectl config get-contexts #list the local Kubernetes contexts +kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl +kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml #deploy Dashboard + +echo "===============================Waiting for Dashboard to be ready===========================================================" +for i in {1..150}; do # Timeout after 5 minutes, 150x2=300 secs + if kubectl get pods --namespace=kubernetes-dashboard | grep Running ; then + break + fi + sleep 2 +done + +kubectl get pod -n kubernetes-dashboard #Verify that Dashboard is deployed and running +kubectl create clusterrolebinding default-admin --clusterrole cluster-admin --serviceaccount=default:default #Create a ClusterRoleBinding to provide admin access to the newly created cluster + +#To login to Dashboard, you need a Bearer Token. Use the following command to store the token in a variable +token=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode) +echo $token #Display the token using the echo command and copy it to use for logging into Dashboard. +kubectl proxy & # Access Dashboard using the kubectl command-line tool by running the following command, Starting to serve on 127.0.0.1:8001 + +for i in {1..60}; do # Timeout after 1 mins, 60x1=60 secs + if nc -z -v 127.0.0.1 8001 2>&1 | grep succeeded ; then + break + fi + sleep 1 +done + +# - kind delete cluster --name istio-testing #delete the existing cluster diff --git a/app/kind_gardener.sh b/app/kind_gardener.sh new file mode 100644 index 0000000..4695f9d --- /dev/null +++ b/app/kind_gardener.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace + +#https://istio.io/docs/setup/platform-setup/gardener/ +#https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md +curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64 +chmod +x ./kind +mv ./kind /usr/local/bin/kind +kind get clusters #see the list of kind clusters +kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind +kind get clusters +snap install kubectl --classic +kubectl config get-contexts #list the local Kubernetes contexts +kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl +kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml #deploy Dashboard +echo "===============================Waiting for Dashboard to be ready===========================================================" +| + for i in {1..150}; do # Timeout after 5 minutes, 150x2=300 secs + if kubectl get pods --namespace=kubernetes-dashboard | grep Running ; then + break + fi + sleep 2 + done +kubectl get pod -n kubernetes-dashboard #Verify that Dashboard is deployed and running +kubectl create clusterrolebinding default-admin --clusterrole cluster-admin --serviceaccount=default:default #Create a ClusterRoleBinding to provide admin access to the newly created cluster + #To login to Dashboard, you need a Bearer Token. Use the following command to store the token in a variable +token=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode) +echo $token #Display the token using the echo command and copy it to use for logging into Dashboard. +kubectl proxy & # Access Dashboard using the kubectl command-line tool by running the following command, Starting to serve on 127.0.0.1:8001 +| + for i in {1..60}; do # Timeout after 1 mins, 60x1=60 secs + if nc -z -v 127.0.0.1 8001 2>&1 | grep succeeded ; then + break + fi + sleep 1 + done + # - kind delete cluster --name istio-testing #delete the existing cluster From f326b27627a03e245b9ab9decfaa3eed25037ab3 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 13:26:11 +0300 Subject: [PATCH 04/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D0=BE=D0=B3=D0=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 6 +++++- app/deploy-kubesec.sh | 14 +++++--------- app/deploy-openfaas.sh | 7 +++++-- 3 files changed, 15 insertions(+), 12 deletions(-) diff --git a/.travis.yml b/.travis.yml index a6ba229..0814a14 100644 --- a/.travis.yml +++ b/.travis.yml @@ -181,7 +181,7 @@ matrix: - sudo kind create cluster --name openfaas-testing - sudo kubectl config use-context kind-openfaas-testing - sudo make deploy-openfaas - - sudo kind delete cluster --name openfaas-testing + - sudo kind delete cluster --name openfaas-testing after_success: - deactivate @@ -204,7 +204,11 @@ matrix: - source ~venvpy3/bin/activate - pip install -r requirements.txt script: + - sudo make deploy-kind + - sudo kind create cluster --name kubesec-testing + - sudo kubectl config use-context kind-kubesec-testing - sudo make deploy-kubesec + - sudo kind delete cluster --name kubesec-testing after_success: - deactivate diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh index c744cf2..372b1cc 100644 --- a/app/deploy-kubesec.sh +++ b/app/deploy-kubesec.sh @@ -1,13 +1,9 @@ #!/bin/bash -echo "=============================deploy kind=============================================================" -docker version -export KIND_VERSION="0.8.1" -curl -Lo ./kind https://kind.sigs.k8s.io/dl/v$KIND_VERSION/kind-$(uname)-amd64 -chmod +x ./kind -mv ./kind /usr/local/bin/kind -kind get clusters #see the list of kind clusters -kind get clusters -kubectl config get-contexts #kind is prefixed to the context and cluster names, for example: kind-istio-testing +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace + echo "=============================kubesec=============================================================" #https://github.com/controlplaneio/kubesec go get -u github.com/controlplaneio/kubesec/cmd/kubesec diff --git a/app/deploy-openfaas.sh b/app/deploy-openfaas.sh index e8e98d4..b8b0664 100644 --- a/app/deploy-openfaas.sh +++ b/app/deploy-openfaas.sh @@ -3,8 +3,11 @@ set -eox pipefail #safety for script # https://itnext.io/deploy-your-first-serverless-function-to-kubernetes-232307f7b0a9 echo "============================OpenFaaS ==============================================================" -`curl -sSLf https://cli.openfaas.com | sh` #install the OpenFaaS CLI -`curl -sSLf https://dl.get-arkade.dev | sh` #install arkade +# `curl -sSLf https://cli.openfaas.com | sh` #install the OpenFaaS CLI +# `curl -sSLf https://dl.get-arkade.dev | sh` #install arkade + +/bin/sh -c 'curl -sSLf https://cli.openfaas.com | sh' ##install the OpenFaaS CLI +/bin/sh -c 'curl -sSLf https://dl.get-arkade.dev | sh' ##install arkade arkade install openfaas #use arkade to install OpenFaaS arkade info openfaas From 4a8a097715b581cee2457dc5036f151abf1b2ed5 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 13:42:36 +0300 Subject: [PATCH 05/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D0=BE=D0=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/deploy-kubesec.sh | 11 ++++ app/deploy-openfaas.sh | 116 ++--------------------------------------- 2 files changed, 14 insertions(+), 113 deletions(-) diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh index 372b1cc..062764b 100644 --- a/app/deploy-kubesec.sh +++ b/app/deploy-kubesec.sh @@ -4,6 +4,17 @@ set -o pipefail set -o nounset set -o xtrace +echo "=============================install go=============================================================" +export GOVERSION="1.14.4" +curl -O https://dl.google.com/go/go$GOVERSION.linux-amd64.tar.gz +tar -xvf go$GOVERSION.linux-amd64.tar.gz +sudo mv go /usr/local +mkdir ~/work +echo "export GOPATH=$HOME/work" >> ~/.profile +echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile +source ~/.profile +go version + echo "=============================kubesec=============================================================" #https://github.com/controlplaneio/kubesec go get -u github.com/controlplaneio/kubesec/cmd/kubesec diff --git a/app/deploy-openfaas.sh b/app/deploy-openfaas.sh index b8b0664..265272d 100644 --- a/app/deploy-openfaas.sh +++ b/app/deploy-openfaas.sh @@ -18,7 +18,8 @@ kubectl port-forward -n openfaas svc/gateway 8080:8080 & # Now log in using the CLI PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo) -echo -n $PASSWORD | faas-cli login --username admin --password-stdin +# interactive shell +# echo -n $PASSWORD | faas-cli login --username admin --password-stdin faas-cli store deploy nodeinfo # Check to see "Ready" status @@ -28,121 +29,10 @@ echo | faas-cli invoke nodeinfo --async # curl http://localhost:8080 # Get the password so you can open the UI -echo $PASSWORD +# echo $PASSWORD # faas-cli template store list # Sign up for a Docker Hub account, so that you can store your functions for free. # export OPENFAAS_PREFIX="DOCKER_HUB_USERNAME" export OPENFAAS_PREFIX=$DOCKER_USERNAME #travisci env var faas-cli new --lang python3 serverless - - - - # - echo "=============================Inspection=============================================================" - # - kubectl get pod -o wide #The IP column will contain the internal cluster IP address for each pod. - # - kubectl get service --all-namespaces # find a Service IP,list all services in all namespaces - # - echo "=============================openEBS=============================================================" - # - pushd $(pwd) && cd app - # - sudo kubectl apply -f https://openebs.github.io/charts/openebs-operator.yaml #install OpenEBS - # - kubectl get service --all-namespaces # find a Service IP,list all services in all namespaces - # - kubectl get pods -n openebs -l openebs.io/component-name=openebs-localpv-provisioner #Observe localhost provisioner pod - # - kubectl get sc #Check the storage Class - # # openesb component list - # #https://github.com/openebs/openebs/blob/master/k8s/openebs-operator.yaml - # - | - # echo "Waiting for openebs-localpv-provisioner component to be ready ..." - # for i in {1..60}; do # Timeout after 5 minutes, 150x5=300 secs - # if sudo kubectl get pods --namespace=openebs -l openebs.io/component-name=openebs-localpv-provisioner | grep Running ; then - # break - # fi - # sleep 5 - # done - # - | - # echo "Waiting for maya-apiserver component to be ready ..." - # for i in {1..60}; do # Timeout after 5 minutes, 150x5=300 secs - # if sudo kubectl get pods --namespace=openebs -l openebs.io/component-name=maya-apiserver | grep Running ; then - # break - # fi - # sleep 5 - # done - # - | - # echo "Waiting for openebs-ndm-operator component to be ready ..." - # for i in {1..60}; do # Timeout after 5 minutes, 150x5=300 secs - # if sudo kubectl get pods --namespace=openebs -l openebs.io/component-name=openebs-ndm-operator | grep Running ; then - # break - # fi - # sleep 5 - # done - # - | - # echo "Waiting for openebs to be ready ..." - # for i in {1..60}; do # Timeout after 2 minutes, 60x2=300 secs - # if sudo kubectl get pods --namespace=openebs | grep Running ; then - # break - # fi - # sleep 2 - # done - # - sudo kubectl get pods --all-namespaces - # - sudo kubectl get pods --namespace=openebs - # - popd - # - echo "=============================openEBS=============================================================" - # #Create a PVC #Create an Nginx Pod which consumes OpenEBS Local PV Hospath Storage #openEBS - # #HPA - # # - pushd $(pwd) && cd hpa - # # - minikube addons list - # # - sudo minikube addons enable metrics-server - # # - minikube addons list - # # - sudo kubectl get pods -n kube-system - # # - sudo kubectl logs -n kube-system deploy/metrics-server - # # - sudo kubectl get svc -n kube-system - # # # - sudo ping 10.96.56.228 -c 1 # ping metrics-server ClusterIP - # # - kubectl get pods -n kube-system -o wide - # # - ping 10.224.13.23 -c 1 # # ping metrics-server pod IP - # # - sudo systemctl status kube-apiserver -l - # # - sudo kubectl top node - # # - sudo kubectl top pod - # # - sudo kubectl describe hpa - # # - | - # # sudo minikube start \ - # # --extra-config=controller-manager.horizontal-pod-autoscaler-upscale-delay=1m \ - # # --extra-config=controller-manager.horizontal-pod-autoscaler-downscale-delay=1m \ - # # --extra-config=controller-manager.horizontal-pod-autoscaler-sync-period=10s \ - # # --extra-config=controller-manager.horizontal-pod-autoscaler-downscale-stabilization=1m - # # - sudo kubectl autoscale deployment php-apache --cpu-percent=50 --min=1 --max=10 - # # - sudo kubectl run --generator=run-pod/v1 -it --rm load-generator --image=busybox /bin/sh #Load generator - # # - while true; do wget -q -O- http://php-apache; done - # # - kubectl get --raw /apis/metrics.k8s.io/v1beta1 - # # - kubectl get pods -n kube-system | grep metrics-server - # # - kubectl get --raw /apis/metrics.k8s.io/v1beta1/nodes | jq '.' - # # - kubectl get --raw /apis/metrics.k8s.io/v1beta1/pods | jq '.' - # # - | - # # kubectl get --raw /apis/metrics.k8s.io/v1beta1/nodes \ - # # | jq '[.items [] | {nodeName: .metadata.name, nodeCpu: .usage.cpu, nodeMemory: .usage.memory}]' - # # - popd - # #HPA - # # #nfs-pv-storage - # - pushd $(pwd) && cd nfs-pv-storage - # - sudo apt install nfs-kernel-server - # - sudo mkdir -p /pv/nfs/test-volume - # - sudo chmod 777 /pv/nfs/test-volume - # - sudo systemctl status nfs-server - # - sudo systemctl stop nfs-server - # - sudo systemctl start nfs-server - # - sudo systemctl restart nfs-server - # # - sudo vi /etc/exports - # # /mnt/nfs/test-volume *(rw,sync,no_subtree_check,insecure) - # # /pv/nfs/test-volume *(rw,sync,no_subtree_check,insecure) - # # - sudo exportfs -a - # # - sudo exportfs -v - # # - sudo kubectl expose deploy nginx-deploy --port 80 --type NodePort - # # - sudo apt-get update - # # - sudo apt-get install -qqy curl - # # - curl http://localhost/ - # - popd - # # #nfs-pv-storage - # # #NGINX as a sample application - # # - sudo kubectl create deployment nginx --image=nginx - # # - sudo kubectl get deployments - # # - sudo kubectl get pods - # # - sudo kubectl get all --all-namespaces - # # #NGINX as a sample application - # From 0b34a13c173887322ac96bd660995a28dead922f Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 14:36:35 +0300 Subject: [PATCH 06/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D1=8B=D0=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 54 +++++++++++++++++++------------------ app/deploy-kubesec.sh | 2 +- app/deploy-microservices.sh | 8 +++++- app/deploy-openfaas.sh | 14 +++++----- 4 files changed, 43 insertions(+), 35 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0814a14..c85b61a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -105,31 +105,33 @@ matrix: # - deactivate - # - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --config=app/kind-config.yaml - # - sudo kubectl cluster-info --context kind-kind - # - sudo make deploy-microservices - # after_success: - # - deactivate + - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK + dist: bionic + arch: amd64 + addons: + snaps: + - name: kubectl + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + - name: helm + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo make deploy-kind + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make deploy-microservices + - sudo kind delete cluster --name tutorial-cluster + after_success: + - deactivate # - name: "istio service mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK # dist: bionic @@ -208,7 +210,7 @@ matrix: - sudo kind create cluster --name kubesec-testing - sudo kubectl config use-context kind-kubesec-testing - sudo make deploy-kubesec - - sudo kind delete cluster --name kubesec-testing + - sudo kind delete cluster --name kubesec-testing after_success: - deactivate diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh index 062764b..cd77614 100644 --- a/app/deploy-kubesec.sh +++ b/app/deploy-kubesec.sh @@ -12,7 +12,7 @@ sudo mv go /usr/local mkdir ~/work echo "export GOPATH=$HOME/work" >> ~/.profile echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile -source ~/.profile +# source ~/.profile go version echo "=============================kubesec=============================================================" diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index eec5ade..8ff925f 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -19,7 +19,13 @@ kind get clusters # - sudo snap install kubectl --classic kubectl config get-contexts #list the local Kubernetes contexts kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl + + +# Setup a Kubernetes Cluster + +# Create an environment variable to store the name of a namespace export NAMESPACE=tutorial +# Create the namespace kubectl create namespace $NAMESPACE #https://istio.io/latest/docs/setup/getting-started/ @@ -106,5 +112,5 @@ export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL #Enable Envoy’s access logging. #https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin -#Deploy the sleep sample app to use as a test source for sending requests. +#Deploy the sleep sample app to use as a test source for sending requests. kubectl apply -f samples/sleep/sleep.yaml diff --git a/app/deploy-openfaas.sh b/app/deploy-openfaas.sh index 265272d..d55faad 100644 --- a/app/deploy-openfaas.sh +++ b/app/deploy-openfaas.sh @@ -17,15 +17,15 @@ kubectl rollout status -n openfaas deploy/gateway kubectl port-forward -n openfaas svc/gateway 8080:8080 & # Now log in using the CLI -PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo) +# PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo) # interactive shell # echo -n $PASSWORD | faas-cli login --username admin --password-stdin -faas-cli store deploy nodeinfo +# faas-cli store deploy nodeinfo # Check to see "Ready" status -faas-cli describe nodeinfo# Invoke -echo | faas-cli invoke nodeinfo -echo | faas-cli invoke nodeinfo --async +# faas-cli describe nodeinfo# Invoke +# echo | faas-cli invoke nodeinfo +# echo | faas-cli invoke nodeinfo --async # curl http://localhost:8080 # Get the password so you can open the UI @@ -34,5 +34,5 @@ echo | faas-cli invoke nodeinfo --async # faas-cli template store list # Sign up for a Docker Hub account, so that you can store your functions for free. # export OPENFAAS_PREFIX="DOCKER_HUB_USERNAME" -export OPENFAAS_PREFIX=$DOCKER_USERNAME #travisci env var -faas-cli new --lang python3 serverless +# export OPENFAAS_PREFIX=$DOCKER_USERNAME #travisci env var +# faas-cli new --lang python3 serverless From b7cd63a8f74080ffc8c4aadbb06cf971af94178e Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 14:48:21 +0300 Subject: [PATCH 07/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D1=8B=D0=B5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/deploy-kubesec.sh | 5 +- app/deploy-microservices.sh | 175 +++++++++++++++++++++--------------- 2 files changed, 106 insertions(+), 74 deletions(-) diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh index cd77614..0785b52 100644 --- a/app/deploy-kubesec.sh +++ b/app/deploy-kubesec.sh @@ -7,12 +7,13 @@ set -o xtrace echo "=============================install go=============================================================" export GOVERSION="1.14.4" curl -O https://dl.google.com/go/go$GOVERSION.linux-amd64.tar.gz -tar -xvf go$GOVERSION.linux-amd64.tar.gz +tar -vf go$GOVERSION.linux-amd64.tar.gz sudo mv go /usr/local +stat /usr/local/go mkdir ~/work echo "export GOPATH=$HOME/work" >> ~/.profile echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile -# source ~/.profile +source ~/.profile go version echo "=============================kubesec=============================================================" diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 8ff925f..e92a836 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -29,6 +29,8 @@ export NAMESPACE=tutorial kubectl create namespace $NAMESPACE #https://istio.io/latest/docs/setup/getting-started/ +# https://istio.io/latest/docs/examples/microservices-istio/setup-kubernetes-cluster/ +# Install Istio using the demo profile. echo "===============================Install istio===========================================================" #Download Istio #/bin/sh -c 'curl -L https://istio.io/downloadIstio | sh -' #download and extract the latest release automatically (Linux or macOS) @@ -41,76 +43,105 @@ export PATH=$PWD/bin:$PATH #Add the istioctl client to your path, The istioctl c #precheck inspects a Kubernetes cluster for Istio install requirements istioctl experimental precheck #https://istio.io/docs/reference/commands/istioctl/#istioctl-experimental-precheck istioctl version -istioctl manifest apply --set profile=demo #Install Istio, use the demo configuration profile -kubectl label namespace default istio-injection=enabled #Add a namespace label to instruct Istio to automatically inject Envoy sidecar proxies when you deploy your application later - -#Deploy the sample application -kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml #Deploy the Bookinfo sample application: -kubectl get service --all-namespaces #list all services in all namespace -kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. -kubectl get pods -for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins - if kubectl get pods --namespace=istio-system |grep Running ; then - break - fi - sleep 2 -done -kubectl get service --all-namespaces #list all services in all namespace - -# see if the app is running inside the cluster and serving HTML pages by checking for the page title in the response -#error: unable to upgrade connection: container not found ("ratings") -#kubectl exec $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" -#interactive shell -#kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" -# - | -# kubectl exec -it $(kubectl get pod \ -# -l app=ratings \ -# -o jsonpath='{.items[0].metadata.name}') \ -# -c ratings \ -# -- curl productpage:9080/productpage | grep -o ".*" Simple Bookstore App - - -#Open the application to outside traffic -#The Bookinfo application is deployed but not accessible from the outside. To make it accessible, you need to create an Istio Ingress Gateway, which maps a path to a route at the edge of your mesh. -kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml #Associate this application with the Istio gateway -istioctl analyze #Ensure that there are no issues with the configuration - -#Other platforms -#Determining the ingress IP and ports -#If the EXTERNAL-IP value is set, your environment has an external load balancer that you can use for the ingress gateway. -#If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway. -#access the gateway using the service’s node port. -kubectl get svc istio-ingressgateway -n istio-system #determine if your Kubernetes cluster is running in an environment that supports external load balancers - -#external load balancer -# #Follow these instructions if you have determined that your environment has an external load balancer. -# # If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway,access the gateway using the service’s node port. -# - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}') -# - export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}') -# - export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}') - -# #In certain environments, the load balancer may be exposed using a host name, instead of an IP address. -# #the ingress gateway’s EXTERNAL-IP value will not be an IP address, but rather a host name - -#failed to set the INGRESS_HOST environment variable, correct the INGRESS_HOST value - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') - -#Follow these instructions if your environment does not have an external load balancer and choose a node port instead -#Set the ingress ports: -export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') #Set the ingress ports -export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}') #Set the ingress ports - -#INGRESS_HOST: unbound variable -export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL -# echo $GATEWAY_URL #Ensure an IP address and port were successfully assigned to the environment variable -# echo http://$GATEWAY_URL/productpage #Verify external access,retrieve the external address of the Bookinfo application -# echo $(curl http://$GATEWAY_URL/productpage) - -#View the dashboard -#istioctl dashboard kiali #optional dashboards installed by the demo installation,Access the Kiali dashboard. The default user name is admin and default password is admin -#istioctl dashboard kiali # interactive shell - -#Enable Envoy’s access logging. -#https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin -#Deploy the sleep sample app to use as a test source for sending requests. +#Install Istio, use the demo configuration profile +istioctl manifest apply --set profile=demo + +#Add a namespace label to instruct Istio to automatically inject Envoy sidecar proxies when you deploy your application later +kubectl label namespace default istio-injection=enabled + +# #Deploy the sample application +# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml #Deploy the Bookinfo sample application: +# kubectl get service --all-namespaces #list all services in all namespace +# kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. +# kubectl get pods +# for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins +# if kubectl get pods --namespace=istio-system |grep Running ; then +# break +# fi +# sleep 2 +# done +# kubectl get service --all-namespaces #list all services in all namespace +# +# # see if the app is running inside the cluster and serving HTML pages by checking for the page title in the response +# #error: unable to upgrade connection: container not found ("ratings") +# #kubectl exec $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" +# #interactive shell +# #kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*" +# # - | +# # kubectl exec -it $(kubectl get pod \ +# # -l app=ratings \ +# # -o jsonpath='{.items[0].metadata.name}') \ +# # -c ratings \ +# # -- curl productpage:9080/productpage | grep -o ".*" Simple Bookstore App +# +# +# #Open the application to outside traffic +# #The Bookinfo application is deployed but not accessible from the outside. To make it accessible, you need to create an Istio Ingress Gateway, which maps a path to a route at the edge of your mesh. +# kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml #Associate this application with the Istio gateway +# istioctl analyze #Ensure that there are no issues with the configuration +# +# #Other platforms +# #Determining the ingress IP and ports +# #If the EXTERNAL-IP value is set, your environment has an external load balancer that you can use for the ingress gateway. +# #If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway. +# #access the gateway using the service’s node port. +# kubectl get svc istio-ingressgateway -n istio-system #determine if your Kubernetes cluster is running in an environment that supports external load balancers +# +# #external load balancer +# # #Follow these instructions if you have determined that your environment has an external load balancer. +# # # If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway,access the gateway using the service’s node port. +# # - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}') +# # - export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}') +# # - export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}') +# +# # #In certain environments, the load balancer may be exposed using a host name, instead of an IP address. +# # #the ingress gateway’s EXTERNAL-IP value will not be an IP address, but rather a host name +# +# #failed to set the INGRESS_HOST environment variable, correct the INGRESS_HOST value +# export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}') +# +# #Follow these instructions if your environment does not have an external load balancer and choose a node port instead +# #Set the ingress ports: +# export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') #Set the ingress ports +# export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}') #Set the ingress ports +# +# #INGRESS_HOST: unbound variable +# export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL +# # echo $GATEWAY_URL #Ensure an IP address and port were successfully assigned to the environment variable +# # echo http://$GATEWAY_URL/productpage #Verify external access,retrieve the external address of the Bookinfo application +# # echo $(curl http://$GATEWAY_URL/productpage) +# +# #View the dashboard +# #istioctl dashboard kiali #optional dashboards installed by the demo installation,Access the Kiali dashboard. The default user name is admin and default password is admin +# #istioctl dashboard kiali # interactive shell +# +# #Enable Envoy’s access logging. +# #https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin +# #Deploy the sleep sample app to use as a test source for sending requests. +# kubectl apply -f samples/sleep/sleep.yaml + + +# enable Envoy’s access logging +# Skip the clean up and delete steps, because you need the sleep application +# https://istio.io/latest/docs/examples/microservices-istio/setup-kubernetes-cluster/ +# https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin +echo "===============================Enable Envoy’s access logging.===========================================================" +# Deploy the sleep sample app to use as a test source for sending requests. +# If you have automatic sidecar injection enabled, run the following command to deploy the sample app + +# Otherwise, manually inject the sidecar before deploying the sleep application +# kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml) + kubectl apply -f samples/sleep/sleep.yaml +# kubectl apply -f https://github.com/istio/istio/tree/release-1.6/samples/sleep + +# Set the SOURCE_POD environment variable to the name of your source pod: +export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name}) + +# Start the httpbin sample. +# If you have enabled automatic sidecar injection, deploy the httpbin service + +# Otherwise, you have to manually inject the sidecar before deploying the httpbin application +# kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) + +kubectl apply -f samples/httpbin/httpbin.yaml From cfa2d5a938b7ea9b044119ec96012f040754186f Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 16:52:54 +0300 Subject: [PATCH 08/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D1=8B=D0=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index c85b61a..cf81070 100644 --- a/.travis.yml +++ b/.travis.yml @@ -198,8 +198,7 @@ matrix: - name: helm confinement: classic # or devmode channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 + language: go before_install: - pip3 install virtualenv - virtualenv -p $(which python3) ~venvpy3 From f8d65fd1d7fcdb883988b20c86ac0b726fb7e067 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 16:53:23 +0300 Subject: [PATCH 09/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D1=8B=D1=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/deploy-kubesec.sh | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh index 0785b52..3be5dbe 100644 --- a/app/deploy-kubesec.sh +++ b/app/deploy-kubesec.sh @@ -4,17 +4,17 @@ set -o pipefail set -o nounset set -o xtrace -echo "=============================install go=============================================================" -export GOVERSION="1.14.4" -curl -O https://dl.google.com/go/go$GOVERSION.linux-amd64.tar.gz -tar -vf go$GOVERSION.linux-amd64.tar.gz -sudo mv go /usr/local -stat /usr/local/go -mkdir ~/work -echo "export GOPATH=$HOME/work" >> ~/.profile -echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile -source ~/.profile -go version +# echo "=============================install go=============================================================" +# export GOVERSION="1.14.4" +# curl -O https://dl.google.com/go/go$GOVERSION.linux-amd64.tar.gz +# tar -vf go$GOVERSION.linux-amd64.tar.gz +# sudo mv go /usr/local +# stat /usr/local/go +# mkdir ~/work +# echo "export GOPATH=$HOME/work" >> ~/.profile +# echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile +# source ~/.profile +# go version echo "=============================kubesec=============================================================" #https://github.com/controlplaneio/kubesec From 157849cce2e257a83ee786f924aa6c0c2a178f31 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 17:00:58 +0300 Subject: [PATCH 10/32] python removed --- .travis.yml | 10 +++++----- app/deploy-microservices.sh | 13 +++++++++++++ 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index cf81070..68311b0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -199,11 +199,11 @@ matrix: confinement: classic # or devmode channel: latest/stable # will be passed to --channel flag language: go - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt script: - sudo make deploy-kind - sudo kind create cluster --name kubesec-testing diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index e92a836..54e5c9d 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -145,3 +145,16 @@ export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.n # kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml) kubectl apply -f samples/httpbin/httpbin.yaml + + +# Enable Envoy’s access logging +# Install Istio using the demo profile. +# replace demo with the name of the profile you used when you installed Istio +istioctl install --set profile=demo --set meshConfig.accessLogFile="/dev/stdout" + +# Test the access log +kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -v httpbin:8000/status/418 +# Check sleep’s log +kubectl logs -l app=sleep -c istio-proxy +# Check httpbin’s log +kubectl logs -l app=httpbin -c istio-proxy From 61917ca5a6202e9f3eed26836d52fe07dc1d4308 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 17:25:58 +0300 Subject: [PATCH 11/32] typos --- app/deploy-microservices.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 54e5c9d..4734bf7 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -153,7 +153,9 @@ kubectl apply -f samples/httpbin/httpbin.yaml istioctl install --set profile=demo --set meshConfig.accessLogFile="/dev/stdout" # Test the access log -kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -v httpbin:8000/status/418 +# connect to 10.110.95.100 port 8000 failed: Connection refused +# kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -v httpbin:8000/status/418 + # Check sleep’s log kubectl logs -l app=sleep -c istio-proxy # Check httpbin’s log From 31b64ade75aac48100f9496162ed2166d77a09e5 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 17:28:46 +0300 Subject: [PATCH 12/32] =?UTF-8?q?=D1=83=D0=B2`=D0=B5=D1=80=D0=B5=D0=BD?= =?UTF-8?q?=D0=BD=D1=8B=D1=85?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 58 +++++++++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 28 deletions(-) diff --git a/.travis.yml b/.travis.yml index 68311b0..00450c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -160,32 +160,33 @@ matrix: # after_success: # - deactivate - - name: "openfaas w snapped kubectl helm Python 3.7 on bionic" #OK - dist: bionic - arch: amd64 - addons: - snaps: - - name: kubectl - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - - name: helm - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name openfaas-testing - - sudo kubectl config use-context kind-openfaas-testing - - sudo make deploy-openfaas - - sudo kind delete cluster --name openfaas-testing - after_success: - - deactivate + #MOVED + # - name: "openfaas w snapped kubectl helm Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name openfaas-testing + # - sudo kubectl config use-context kind-openfaas-testing + # - sudo make deploy-openfaas + # - sudo kind delete cluster --name openfaas-testing + # after_success: + # - deactivate - name: "kubesec w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic @@ -198,7 +199,7 @@ matrix: - name: helm confinement: classic # or devmode channel: latest/stable # will be passed to --channel flag - language: go + language: go # before_install: # - pip3 install virtualenv # - virtualenv -p $(which python3) ~venvpy3 @@ -208,7 +209,8 @@ matrix: - sudo make deploy-kind - sudo kind create cluster --name kubesec-testing - sudo kubectl config use-context kind-kubesec-testing - - sudo make deploy-kubesec + - go version + # - sudo make deploy-kubesec - sudo kind delete cluster --name kubesec-testing after_success: - deactivate From 985ceff3b7156cc5c7ce6b74ef07636dc5588c88 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 17:46:47 +0300 Subject: [PATCH 13/32] =?UTF-8?q?=D0=B1=D1=8B=D1=82=D1=8C=20=D1=83=D0=B2?= =?UTF-8?q?=D0=B5=CC=81=D1=80=D0=B5=D0=BD=20=D0=B2=20=D1=87=D1=91=D0=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 3 ++- app/deploy-kubesec.sh | 2 +- app/deploy-microservices.sh | 11 +++++++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 00450c7..30347ab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -210,7 +210,8 @@ matrix: - sudo kind create cluster --name kubesec-testing - sudo kubectl config use-context kind-kubesec-testing - go version - # - sudo make deploy-kubesec + - go get -u github.com/controlplaneio/kubesec/cmd/kubesec + - sudo make deploy-kubesec - sudo kind delete cluster --name kubesec-testing after_success: - deactivate diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh index 3be5dbe..8ae87bb 100644 --- a/app/deploy-kubesec.sh +++ b/app/deploy-kubesec.sh @@ -18,7 +18,7 @@ set -o xtrace echo "=============================kubesec=============================================================" #https://github.com/controlplaneio/kubesec -go get -u github.com/controlplaneio/kubesec/cmd/kubesec +# go get -u github.com/controlplaneio/kubesec/cmd/kubesec #Command line usage cat < kubesec-test.yaml diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 4734bf7..93e2a00 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -156,6 +156,17 @@ istioctl install --set profile=demo --set meshConfig.accessLogFile="/dev/stdout" # connect to 10.110.95.100 port 8000 failed: Connection refused # kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -v httpbin:8000/status/418 +kubectl get pods --all-namespaces +echo echo "Waiting for kubernetes be ready ..." +for i in {1..150}; do # Timeout after 5 minutes, 60x5=300 secs + if kubectl get pods --namespace=istio-system | grep ContainerCreating ; then + sleep 10 + else + break + fi +done +kubectl get pods --all-namespaces + # Check sleep’s log kubectl logs -l app=sleep -c istio-proxy # Check httpbin’s log From 276682535b7fa07ed084ff5136f536db6c220577 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 8 Jul 2020 21:08:41 +0300 Subject: [PATCH 14/32] =?UTF-8?q?=D0=BB`=D1=8E=D0=B4=D0=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/deploy-microservices.sh | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 93e2a00..67645e5 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -157,6 +157,14 @@ istioctl install --set profile=demo --set meshConfig.accessLogFile="/dev/stdout" # kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -v httpbin:8000/status/418 kubectl get pods --all-namespaces +echo echo "Waiting for sleep and httpbin to be ready ..." +for i in {1..150}; do # Timeout after 5 minutes, 60x5=300 secs + if kubectl get pods --namespace=default | grep PodInitializing ; then + sleep 10 + else + break + fi +done echo echo "Waiting for kubernetes be ready ..." for i in {1..150}; do # Timeout after 5 minutes, 60x5=300 secs if kubectl get pods --namespace=istio-system | grep ContainerCreating ; then From e2fc98708a48e626f5903f830fd706c5f97c5257 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Thu, 9 Jul 2020 11:46:10 +0300 Subject: [PATCH 15/32] Kubernetes Ingress resource --- app/deploy-microservices.sh | 48 ++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 67645e5..2532eb6 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -178,4 +178,50 @@ kubectl get pods --all-namespaces # Check sleep’s log kubectl logs -l app=sleep -c istio-proxy # Check httpbin’s log -kubectl logs -l app=httpbin -c istio-proxy +kubectl logs -l app=httpbin -c istio-proxy #2020-07-08T18:15:02.910663Z info Envoy proxy is ready + + +# https://istio.io/latest/docs/examples/microservices-istio/setup-kubernetes-cluster/ +# Create a Kubernetes Ingress resource for these common Istio services using the kubectl +# Grafana +# Jaeger +# Prometheus +# Kiali +# The kubectl command can accept an in-line configuration to create the Ingress resources for each service +kubectl apply -f - < Date: Thu, 9 Jul 2020 13:35:52 +0300 Subject: [PATCH 16/32] =?UTF-8?q?=D0=BB`=D1=8E=D0=B4=D0=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/deploy-microservices.sh | 188 ++++++++++++++++++++++++++++++++++++ tutorial-user-config.yaml | 27 ++++++ 2 files changed, 215 insertions(+) create mode 100644 tutorial-user-config.yaml diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 2532eb6..46b06b4 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -225,3 +225,191 @@ spec: serviceName: kiali servicePort: 20001 EOF + + +# Create a role to provide read access to the istio-system namespace. +# This role is required to limit permissions of the participants +kubectl apply -f - < + + + +# Run Bookinfo with Kubernetes +# how to deploy the whole application to a Kubernetes cluster. +# https://istio.io/latest/docs/examples/microservices-istio/bookinfo-kubernetes/ + + +# Deploy the application and a testing pod +# Set the MYHOST environment variable to hold the URL of the application +export MYHOST=$(kubectl config view -o jsonpath={.contexts..namespace}).bookinfo.com + +# Deploy the application to your Kubernetes cluster +kubectl apply -l version!=v2,version!=v3 -f https://raw.githubusercontent.com/istio/istio/release-1.6/samples/bookinfo/platform/kube/bookinfo.yaml + +# Check the status of the pods +kubectl get pods + + +# After the four services achieve the Running status, you can scale the deployment +# each version of each microservice run in three pods +kubectl scale deployments --all --replicas 3 + +# Check the status of the pods +kubectl get pods + +# After the services achieve the Running status, deploy a testing pod, sleep, to use for sending requests to your microservices +kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.6/samples/sleep/sleep.yaml + +# confirm that the Bookinfo application is running, send a request to it with a curl command from your testing pod: +# interactive shell +# kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl productpage:9080/productpage | grep -o ".*" + + +# Enable external access to the application diff --git a/tutorial-user-config.yaml b/tutorial-user-config.yaml new file mode 100644 index 0000000..f3aa6bd --- /dev/null +++ b/tutorial-user-config.yaml @@ -0,0 +1,27 @@ +cat < ./${NAMESPACE}-user-config.yaml +apiVersion: v1 +kind: Config +preferences: {} + +clusters: +- cluster: + certificate-authority-data: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath='{.data.ca\.crt}') + server: $(kubectl config view -o jsonpath="{.clusters[?(.name==\"$(kubectl config view -o jsonpath="{.contexts[?(.name==\"$(kubectl config current-context)\")].context.cluster}")\")].cluster.server}") + name: ${NAMESPACE}-cluster + +users: +- name: ${NAMESPACE}-user + user: + as-user-extra: {} + client-key-data: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath='{.data.ca\.crt}') + token: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath={.data.token} | base64 --decode) + +contexts: +- context: + cluster: ${NAMESPACE}-cluster + namespace: ${NAMESPACE} + user: ${NAMESPACE}-user + name: ${NAMESPACE} + +current-context: ${NAMESPACE} +EOF From 64a7c1bc2606034aad597005634db77e3417c63f Mon Sep 17 00:00:00 2001 From: githubfoam Date: Thu, 9 Jul 2020 13:45:35 +0300 Subject: [PATCH 17/32] typo --- app/deploy-microservices.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 46b06b4..57554cf 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -313,8 +313,8 @@ EOF echo $NAMESPACE # Set the KUBECONFIG environment variable for the ${NAMESPACE}-user-config.yaml configuration file -export KUBECONFIG=./${NAMESPACE}-user-config.yaml - +# export KUBECONFIG=./${NAMESPACE}-user-config.yaml +export KUBECONFIG=${NAMESPACE}-user-config.yaml # Verify that the configuration took effect by printing the current namespaces # see the name of your namespace in the output kubectl config view -o jsonpath="{.contexts[?(@.name==\"$(kubectl config current-context)\")].context.namespace}" From c5be5ff74e350063682614fea187873532a8f332 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Thu, 9 Jul 2020 14:43:36 +0300 Subject: [PATCH 18/32] typo --- app/deploy-microservices.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 57554cf..140cee6 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -314,7 +314,10 @@ echo $NAMESPACE # Set the KUBECONFIG environment variable for the ${NAMESPACE}-user-config.yaml configuration file # export KUBECONFIG=./${NAMESPACE}-user-config.yaml -export KUBECONFIG=${NAMESPACE}-user-config.yaml +ls -lai ~/ +pwd +ls -lai +export KUBECONFIG=../${NAMESPACE}-user-config.yaml # Verify that the configuration took effect by printing the current namespaces # see the name of your namespace in the output kubectl config view -o jsonpath="{.contexts[?(@.name==\"$(kubectl config current-context)\")].context.namespace}" From 1bcc86307a3bbb45ccf30566e12cecbc28894ed6 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Thu, 9 Jul 2020 15:33:56 +0300 Subject: [PATCH 19/32] dir me --- app/deploy-microservices.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh index 140cee6..6aee014 100644 --- a/app/deploy-microservices.sh +++ b/app/deploy-microservices.sh @@ -20,6 +20,7 @@ kind get clusters kubectl config get-contexts #list the local Kubernetes contexts kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl +BASEDIR=`pwd` && echo $BASEDIR # Setup a Kubernetes Cluster @@ -314,10 +315,8 @@ echo $NAMESPACE # Set the KUBECONFIG environment variable for the ${NAMESPACE}-user-config.yaml configuration file # export KUBECONFIG=./${NAMESPACE}-user-config.yaml -ls -lai ~/ -pwd -ls -lai -export KUBECONFIG=../${NAMESPACE}-user-config.yaml +export KUBECONFIG=$BASEDIR/${NAMESPACE}-user-config.yaml + # Verify that the configuration took effect by printing the current namespaces # see the name of your namespace in the output kubectl config view -o jsonpath="{.contexts[?(@.name==\"$(kubectl config current-context)\")].context.namespace}" From 811a3b01fa0e1d9af80687a3b699c579b14bbb57 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Thu, 9 Jul 2020 16:10:41 +0300 Subject: [PATCH 20/32] typo --- tutorial-user-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/tutorial-user-config.yaml b/tutorial-user-config.yaml index f3aa6bd..323f6a0 100644 --- a/tutorial-user-config.yaml +++ b/tutorial-user-config.yaml @@ -1,4 +1,3 @@ -cat < ./${NAMESPACE}-user-config.yaml apiVersion: v1 kind: Config preferences: {} @@ -24,4 +23,3 @@ contexts: name: ${NAMESPACE} current-context: ${NAMESPACE} -EOF From c6301527fc4f96f4860e16a1bf9015c2dea7307d Mon Sep 17 00:00:00 2001 From: githubfoam Date: Thu, 9 Jul 2020 16:34:41 +0300 Subject: [PATCH 21/32] renew --- tutorial-user-config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tutorial-user-config.yaml b/tutorial-user-config.yaml index 323f6a0..6906254 100644 --- a/tutorial-user-config.yaml +++ b/tutorial-user-config.yaml @@ -23,3 +23,4 @@ contexts: name: ${NAMESPACE} current-context: ${NAMESPACE} + From b4836ea7d289245c5cdc4d6f9e6e8e943c5ba012 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sat, 11 Jul 2020 21:59:51 +0300 Subject: [PATCH 22/32] =?UTF-8?q?=09=D0=BD=D0=B0=D1=80`=D0=BE=D0=B4?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 30347ab..09752f6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -124,7 +124,6 @@ matrix: - source ~venvpy3/bin/activate - pip install -r requirements.txt script: - - sudo make deploy-kind - sudo make deploy-kind - sudo kind create cluster --name tutorial-cluster - sudo kubectl config use-context kind-tutorial-cluster From 7ca73e4b76888e8402fb9857ea761d2b39d2464e Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sun, 2 Aug 2020 19:44:43 +0300 Subject: [PATCH 23/32] init --- .travis.yml | 56 ++++++++++++++++++ Makefile | 9 +++ app/counter.yaml | 9 +++ app/deploy-efk.sh | 19 ++++++ app/fluentd-daemonset-elasticsearch.yaml | 73 ++++++++++++++++++++++++ app/kibana-values.yaml | 10 ++++ app/provision-helm.sh | 24 ++++++++ app/provision-kubectl.sh | 11 ++++ 8 files changed, 211 insertions(+) create mode 100644 app/counter.yaml create mode 100644 app/deploy-efk.sh create mode 100644 app/fluentd-daemonset-elasticsearch.yaml create mode 100644 app/kibana-values.yaml create mode 100644 app/provision-helm.sh create mode 100644 app/provision-kubectl.sh diff --git a/.travis.yml b/.travis.yml index 09752f6..df16f51 100644 --- a/.travis.yml +++ b/.travis.yml @@ -105,6 +105,62 @@ matrix: # - deactivate + - name: "EFK kind wo snaps Python 3.7 on bionic" #OK + dist: bionic + arch: amd64 + addons: + snaps: + - name: kubectl + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + - name: helm + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make provision-kubectl + - sudo make provision-helm + - sudo make deploy-efk + - sudo kind delete cluster --name tutorial-cluster + after_success: + - deactivate + + - name: "EFK kind w snapped kubectl helm Python 3.7 on bionic" #OK + dist: bionic + arch: amd64 + addons: + snaps: + - name: kubectl + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + - name: helm + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make deploy-efk + - sudo kind delete cluster --name tutorial-cluster + after_success: + - deactivate + - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 diff --git a/Makefile b/Makefile index 7a0592a..1eb696f 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,15 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" +deploy-efk: + bash scripts/deploy-efk.sh + +provision-helm: + bash scripts/provision-helm.sh + +provision-kubectl: + bash scripts/provision-kubectl.sh + deploy-openfaas: bash app/deploy-openfaas.sh diff --git a/app/counter.yaml b/app/counter.yaml new file mode 100644 index 0000000..cbcc8ef --- /dev/null +++ b/app/counter.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: Pod +metadata: + name: counter +spec: + containers: + - name: count + image: busybox + args: [/bin/sh, -c, 'i=0; while true; do echo "This is demo log $i: $(date)"; i=$((i+1)); sleep 1; done'] \ No newline at end of file diff --git a/app/deploy-efk.sh b/app/deploy-efk.sh new file mode 100644 index 0000000..8a808b4 --- /dev/null +++ b/app/deploy-efk.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +echo "=============================EFK Elastic Fluentd Kibana=============================================================" + +helm install elasticsearch stable/elasticsearch +sleep 10 + +kubectl apply -f app/fluentd-daemonset-elasticsearch.yaml + +helm install kibana stable/kibana -f app/kibana-values.yaml + +kubectl apply -f app/counter.yaml + +# curl kibana dashboard diff --git a/app/fluentd-daemonset-elasticsearch.yaml b/app/fluentd-daemonset-elasticsearch.yaml new file mode 100644 index 0000000..12873eb --- /dev/null +++ b/app/fluentd-daemonset-elasticsearch.yaml @@ -0,0 +1,73 @@ +# src: https://github.com/fluent/fluentd-kubernetes-daemonset/blob/master/fluentd-daemonset-elasticsearch.yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: fluentd + # namespace: kube-system + labels: + k8s-app: fluentd-logging + version: v1 +spec: + selector: + matchLabels: + k8s-app: fluentd-logging + version: v1 + template: + metadata: + labels: + k8s-app: fluentd-logging + version: v1 + spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + containers: + - name: fluentd + image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch + env: + - name: FLUENT_ELASTICSEARCH_HOST + value: "elasticsearch-client" + - name: FLUENT_ELASTICSEARCH_PORT + value: "9200" + - name: FLUENT_ELASTICSEARCH_SCHEME + value: "http" + # Option to configure elasticsearch plugin with self signed certs + # ================================================================ + - name: FLUENT_ELASTICSEARCH_SSL_VERIFY + value: "false" # changed by me + # Option to configure elasticsearch plugin with tls + # ================================================================ + - name: FLUENT_ELASTICSEARCH_SSL_VERSION + value: "TLSv1_2" + # X-Pack Authentication + # ===================== + - name: FLUENT_ELASTICSEARCH_USER + value: "elastic" + - name: FLUENT_ELASTICSEARCH_PASSWORD + value: "changeme" + # Logz.io Authentication + # ====================== + - name: LOGZIO_TOKEN + value: "ThisIsASuperLongToken" + - name: LOGZIO_LOGTYPE + value: "kubernetes" + resources: + limits: + memory: 200Mi + requests: + cpu: 100m + memory: 200Mi + volumeMounts: + - name: varlog + mountPath: /var/log + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + terminationGracePeriodSeconds: 30 + volumes: + - name: varlog + hostPath: + path: /var/log + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers \ No newline at end of file diff --git a/app/kibana-values.yaml b/app/kibana-values.yaml new file mode 100644 index 0000000..aaf6e48 --- /dev/null +++ b/app/kibana-values.yaml @@ -0,0 +1,10 @@ +files: + kibana.yml: + ## Default Kibana configuration from kibana-docker. + server.name: kibana + server.host: "0" + ## For kibana < 6.6, use elasticsearch.url instead + elasticsearch.hosts: http://elasticsearch-client:9200 + +service: + type: LoadBalancer # ClusterIP \ No newline at end of file diff --git a/app/provision-helm.sh b/app/provision-helm.sh new file mode 100644 index 0000000..b74c84c --- /dev/null +++ b/app/provision-helm.sh @@ -0,0 +1,24 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + + +echo fetch and install helm... +HELM_ARCHIVE=helm-v2.12.1-linux-amd64.tar.gz +HELM_DIR=linux-amd64 +HELM_BIN=$HELM_DIR/helm +curl -LsO https://storage.googleapis.com/kubernetes-helm/$HELM_ARCHIVE && tar -zxvf $HELM_ARCHIVE && chmod +x $HELM_BIN && cp $HELM_BIN /usr/local/bin +rm $HELM_ARCHIVE +rm -rf $HELM_DIR + +helm version + +echo setup tiller account... +kubectl -n kube-system create sa tiller && kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller + +echo initialize tiller... +helm init --wait --skip-refresh --upgrade --service-account tiller +echo tiller initialized \ No newline at end of file diff --git a/app/provision-kubectl.sh b/app/provision-kubectl.sh new file mode 100644 index 0000000..e4f2c21 --- /dev/null +++ b/app/provision-kubectl.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +echo fetch and install kubectl... +curl -LsO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl + +kubectl version --client \ No newline at end of file From 2dbb596482c5fb55cfcf5052f7ba43285b30eed7 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sun, 2 Aug 2020 19:58:24 +0300 Subject: [PATCH 24/32] elk init --- .travis.yml | 61 +++++++++++++++++++++++++++++++++++++++++++++-- Makefile | 5 +++- app/deploy-elk.sh | 37 ++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 3 deletions(-) create mode 100644 app/deploy-elk.sh diff --git a/.travis.yml b/.travis.yml index df16f51..4fe8ebd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,8 +104,7 @@ matrix: # after_success: # - deactivate - - - name: "EFK kind wo snaps Python 3.7 on bionic" #OK + - name: "EFK kind w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 addons: @@ -118,6 +117,64 @@ matrix: channel: latest/stable # will be passed to --channel flag language: python python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make deploy-elk + - sudo kind delete cluster --name tutorial-cluster + after_success: + - deactivate + + + - name: "ELK kind wo snaps Python 3.7 on bionic" #OK + dist: bionic + arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make provision-kubectl + - sudo make provision-helm + - sudo make deploy-elk + - sudo kind delete cluster --name tutorial-cluster + after_success: + - deactivate + + + - name: "EFK kind wo snaps Python 3.7 on bionic" #OK + dist: bionic + arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 before_install: - pip3 install virtualenv - virtualenv -p $(which python3) ~venvpy3 diff --git a/Makefile b/Makefile index 1eb696f..f2140f5 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,12 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" +deploy-elk: + bash scripts/deploy-elk.sh + deploy-efk: bash scripts/deploy-efk.sh - + provision-helm: bash scripts/provision-helm.sh diff --git a/app/deploy-elk.sh b/app/deploy-elk.sh new file mode 100644 index 0000000..13c3c31 --- /dev/null +++ b/app/deploy-elk.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +echo "=============================ELK Elastic Kibana Logstash=============================================================" + +kubectl create namespace elk +kubectl apply --namespace=elk -f - <<"EOF" +apiVersion: v1 +kind: LimitRange +metadata: + name: mem-limit-range +spec: + limits: + - default: + memory: 2000Mi + cpu: 2000m + defaultRequest: + memory: 1000Mi + cpu: 1000m + type: Container +EOF +echo "resource quotas applied to the namespace" + +helm install --name elastic-stack --namespace=elk stable/elastic-stack -f my-elastic-stack.yaml +sleep 150 +kubectl get pods -n elk -l "release=elastic-stack" + +helm install --name kube-state-metrics --namespace=elk stable/kube-state-metrics +helm install --name elastic-metricbeat --namespace=elk stable/metricbeat -f my-elastic-metricbeat.yaml # metricbeat dashboard +kubectl --namespace=elk get pods -l "app=metricbeat,release=elastic-metricbeat" + +export POD_NAME=$(kubectl get pods -n elk -l "app=kibana,release=elastic-stack" -o jsonpath="{.items[0].metadata.name}"); +kubectl port-forward -n elk $POD_NAME 5601:5601 From 72f3045f6f74efea96ce847e04d0773906a97213 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sun, 2 Aug 2020 20:04:43 +0300 Subject: [PATCH 25/32] reinit elk --- app/deploy-elk.sh | 9 ++++++--- app/sample-elastic-metricbeat.yaml | 30 ++++++++++++++++++++++++++++++ app/sample-elastic-stack.yaml | 23 +++++++++++++++++++++++ 3 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 app/sample-elastic-metricbeat.yaml create mode 100644 app/sample-elastic-stack.yaml diff --git a/app/deploy-elk.sh b/app/deploy-elk.sh index 13c3c31..550bc87 100644 --- a/app/deploy-elk.sh +++ b/app/deploy-elk.sh @@ -25,13 +25,16 @@ spec: EOF echo "resource quotas applied to the namespace" -helm install --name elastic-stack --namespace=elk stable/elastic-stack -f my-elastic-stack.yaml -sleep 150 +helm install --name elastic-stack --namespace=elk stable/elastic-stack -f app/sample-elastic-stack.yaml +sleep 180 + kubectl get pods -n elk -l "release=elastic-stack" helm install --name kube-state-metrics --namespace=elk stable/kube-state-metrics -helm install --name elastic-metricbeat --namespace=elk stable/metricbeat -f my-elastic-metricbeat.yaml # metricbeat dashboard +helm install --name elastic-metricbeat --namespace=elk stable/metricbeat -f app/sample-elastic-metricbeat.yaml # metricbeat dashboard kubectl --namespace=elk get pods -l "app=metricbeat,release=elastic-metricbeat" export POD_NAME=$(kubectl get pods -n elk -l "app=kibana,release=elastic-stack" -o jsonpath="{.items[0].metadata.name}"); kubectl port-forward -n elk $POD_NAME 5601:5601 + +curl http://localhost:5601 diff --git a/app/sample-elastic-metricbeat.yaml b/app/sample-elastic-metricbeat.yaml new file mode 100644 index 0000000..22c5fd9 --- /dev/null +++ b/app/sample-elastic-metricbeat.yaml @@ -0,0 +1,30 @@ +image: + repository: docker.elastic.co/beats/metricbeat-oss +daemonset: + config: + output.file: false + output.elasticsearch: + hosts: ["elastic-stack-elasticsearch-client:9200"] + modules: + kubernetes: + config: + - module: kubernetes + metricsets: + - node + - system + - pod + - container + - volume + period: 10s + host: ${NODE_NAME} + hosts: ["https://${HOSTNAME}:10250"] + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + ssl.verification_mode: "none" +deployment: + config: + output.file: false + output.elasticsearch: + hosts: ["elastic-stack-elasticsearch-client:9200"] + setup.kibana: + host: "elastic-stack-kibana:443" + setup.dashboards.enabled: true \ No newline at end of file diff --git a/app/sample-elastic-stack.yaml b/app/sample-elastic-stack.yaml new file mode 100644 index 0000000..03502dc --- /dev/null +++ b/app/sample-elastic-stack.yaml @@ -0,0 +1,23 @@ +logstash: + enabled: true + elasticsearch: + host: elastic-stack-elasticsearch-client + +filebeat: + enabled: true + config: + input: + type: container + paths: + - /var/log/containers/*.log + processors: + - add_kubernetes_metadata: + host: ${NODE_NAME} + matchers: + - logs_path: + logs_path: "/var/log/containers/" + output.file.enabled: false + output.logstash: + hosts: ["elastic-stack-logstash:5044"] + indexTemplateLoad: + - elastic-stack-elasticsearch-client:9200 \ No newline at end of file From 059c4d39ec74a659a14fa717f1413c10f9a3d95e Mon Sep 17 00:00:00 2001 From: githubfoam Date: Sun, 2 Aug 2020 22:17:56 +0300 Subject: [PATCH 26/32] =?UTF-8?q?=D1=81=D0=BC=D0=BE=D1=82=D1=80=D0=B8?= =?UTF-8?q?=D0=BC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Makefile | 8 ++++---- app/deploy-elk.sh | 10 +++++----- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Makefile b/Makefile index f2140f5..ba3da2f 100644 --- a/Makefile +++ b/Makefile @@ -2,16 +2,16 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" deploy-elk: - bash scripts/deploy-elk.sh + bash app/deploy-elk.sh deploy-efk: - bash scripts/deploy-efk.sh + bash app/deploy-efk.sh provision-helm: - bash scripts/provision-helm.sh + bash app/provision-helm.sh provision-kubectl: - bash scripts/provision-kubectl.sh + bash app/provision-kubectl.sh deploy-openfaas: bash app/deploy-openfaas.sh diff --git a/app/deploy-elk.sh b/app/deploy-elk.sh index 550bc87..4bb3216 100644 --- a/app/deploy-elk.sh +++ b/app/deploy-elk.sh @@ -25,16 +25,16 @@ spec: EOF echo "resource quotas applied to the namespace" -helm install --name elastic-stack --namespace=elk stable/elastic-stack -f app/sample-elastic-stack.yaml -sleep 180 - +helm install --name elastic-stack --namespace=elk stable/elastic-stack -f my-elastic-stack.yaml +sleep 150 kubectl get pods -n elk -l "release=elastic-stack" helm install --name kube-state-metrics --namespace=elk stable/kube-state-metrics -helm install --name elastic-metricbeat --namespace=elk stable/metricbeat -f app/sample-elastic-metricbeat.yaml # metricbeat dashboard +helm install --name elastic-metricbeat --namespace=elk stable/metricbeat -f my-elastic-metricbeat.yaml # metricbeat dashboard kubectl --namespace=elk get pods -l "app=metricbeat,release=elastic-metricbeat" export POD_NAME=$(kubectl get pods -n elk -l "app=kibana,release=elastic-stack" -o jsonpath="{.items[0].metadata.name}"); kubectl port-forward -n elk $POD_NAME 5601:5601 -curl http://localhost:5601 +# view dashoard +curl http://localhost:5601/ \ No newline at end of file From d087ce6503e05c09255f5832c95bebdfbaf02e1e Mon Sep 17 00:00:00 2001 From: githubfoam Date: Tue, 11 Aug 2020 15:44:25 +0300 Subject: [PATCH 27/32] init --- .travis.yml | 254 +++++++++++++++++++++++----------------- Makefile | 3 + app/deploy-chaosmesh.sh | 29 +++++ 3 files changed, 176 insertions(+), 110 deletions(-) create mode 100644 app/deploy-chaosmesh.sh diff --git a/.travis.yml b/.travis.yml index 4fe8ebd..6c1ab2e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,7 +104,7 @@ matrix: # after_success: # - deactivate - - name: "EFK kind w snapped kubectl helm Python 3.7 on bionic" #OK + - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 addons: @@ -126,124 +126,158 @@ matrix: - sudo make deploy-kind - sudo kind create cluster --name tutorial-cluster - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-elk + - sudo make deploy-chaosmesh - sudo kind delete cluster --name tutorial-cluster after_success: - deactivate - - name: "ELK kind wo snaps Python 3.7 on bionic" #OK - dist: bionic - arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name tutorial-cluster - - sudo kubectl config use-context kind-tutorial-cluster - - sudo make provision-kubectl - - sudo make provision-helm - - sudo make deploy-elk - - sudo kind delete cluster --name tutorial-cluster - after_success: - - deactivate + + # - name: "ELK kind w snapped kubectl helm Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name tutorial-cluster + # - sudo kubectl config use-context kind-tutorial-cluster + # - sudo make deploy-elk + # - sudo kind delete cluster --name tutorial-cluster + # after_success: + # - deactivate + + + # - name: "ELK kind wo snaps Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # # addons: + # # snaps: + # # - name: kubectl + # # confinement: classic # or devmode + # # channel: latest/stable # will be passed to --channel flag + # # - name: helm + # # confinement: classic # or devmode + # # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name tutorial-cluster + # - sudo kubectl config use-context kind-tutorial-cluster + # - sudo make provision-kubectl + # - sudo make provision-helm + # - sudo make deploy-elk + # - sudo kind delete cluster --name tutorial-cluster + # after_success: + # - deactivate + + + # - name: "EFK kind wo snaps Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # # addons: + # # snaps: + # # - name: kubectl + # # confinement: classic # or devmode + # # channel: latest/stable # will be passed to --channel flag + # # - name: helm + # # confinement: classic # or devmode + # # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name tutorial-cluster + # - sudo kubectl config use-context kind-tutorial-cluster + # - sudo make provision-kubectl + # - sudo make provision-helm + # - sudo make deploy-efk + # - sudo kind delete cluster --name tutorial-cluster + # after_success: + # - deactivate + + # - name: "EFK kind w snapped kubectl helm Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name tutorial-cluster + # - sudo kubectl config use-context kind-tutorial-cluster + # - sudo make deploy-efk + # - sudo kind delete cluster --name tutorial-cluster + # after_success: + # - deactivate + + # - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name tutorial-cluster + # - sudo kubectl config use-context kind-tutorial-cluster + # - sudo make deploy-microservices + # - sudo kind delete cluster --name tutorial-cluster + # after_success: + # - deactivate + - - name: "EFK kind wo snaps Python 3.7 on bionic" #OK - dist: bionic - arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name tutorial-cluster - - sudo kubectl config use-context kind-tutorial-cluster - - sudo make provision-kubectl - - sudo make provision-helm - - sudo make deploy-efk - - sudo kind delete cluster --name tutorial-cluster - after_success: - - deactivate - - name: "EFK kind w snapped kubectl helm Python 3.7 on bionic" #OK - dist: bionic - arch: amd64 - addons: - snaps: - - name: kubectl - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - - name: helm - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name tutorial-cluster - - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-efk - - sudo kind delete cluster --name tutorial-cluster - after_success: - - deactivate - - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK - dist: bionic - arch: amd64 - addons: - snaps: - - name: kubectl - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - - name: helm - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name tutorial-cluster - - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-microservices - - sudo kind delete cluster --name tutorial-cluster - after_success: - - deactivate # - name: "istio service mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK # dist: bionic diff --git a/Makefile b/Makefile index ba3da2f..eb05f49 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,9 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" +deploy-chaosmesh: + bash app/deploy-chaosmesh.sh + deploy-elk: bash app/deploy-elk.sh diff --git a/app/deploy-chaosmesh.sh b/app/deploy-chaosmesh.sh new file mode 100644 index 0000000..2d173a6 --- /dev/null +++ b/app/deploy-chaosmesh.sh @@ -0,0 +1,29 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +# https://chaos-mesh.org/docs/installation/get_started_on_kind/ +echo "===============================Install Chaos Mesh===========================================================" + +/bin/sh -c 'curl -sSL https://raw.githubusercontent.com/chaos-mesh/chaos-mesh/master/install.sh | bash -s -- --local kind' +# curl -sSL https://raw.githubusercontent.com/chaos-mesh/chaos-mesh/master/install.sh | bash -s -- --local kind + + +#Deploy the sample application +kubectl get service --all-namespaces #list all services in all namespace +kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. +kubectl get pods + +for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins + if kubectl get pods --namespace=chaos-testing |grep Running ; then + break + fi + sleep 2 +done + +kubectl get service --all-namespaces #list all services in all namespace +# Verify your installation +kubectl get pod -n chaos-testing From 06b08441ac563c3fdf53dc885641b41624d6d130 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Tue, 11 Aug 2020 16:05:29 +0300 Subject: [PATCH 28/32] reinit --- .travis.yml | 83 ++++++++++++++++++++++++++--------------------------- 1 file changed, 41 insertions(+), 42 deletions(-) diff --git a/.travis.yml b/.travis.yml index 6c1ab2e..f4cfbbb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,34 +104,6 @@ matrix: # after_success: # - deactivate - - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK - dist: bionic - arch: amd64 - addons: - snaps: - - name: kubectl - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - - name: helm - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name tutorial-cluster - - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-chaosmesh - - sudo kind delete cluster --name tutorial-cluster - after_success: - - deactivate - - # - name: "ELK kind w snapped kubectl helm Python 3.7 on bionic" #OK # dist: bionic @@ -334,7 +306,36 @@ matrix: # after_success: # - deactivate - - name: "kubesec w snapped kubectl helm Python 3.7 on bionic" #OK + # - name: "kubesec w snapped kubectl helm Python 3.7 on bionic" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: go + # # before_install: + # # - pip3 install virtualenv + # # - virtualenv -p $(which python3) ~venvpy3 + # # - source ~venvpy3/bin/activate + # # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name kubesec-testing + # - sudo kubectl config use-context kind-kubesec-testing + # - go version + # - go get -u github.com/controlplaneio/kubesec/cmd/kubesec + # - sudo make deploy-kubesec + # - sudo kind delete cluster --name kubesec-testing + # after_success: + # - deactivate + +# ######################## TESTED OK STARTS ################################# + - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 addons: @@ -345,24 +346,22 @@ matrix: - name: helm confinement: classic # or devmode channel: latest/stable # will be passed to --channel flag - language: go - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt script: - sudo make deploy-kind - - sudo kind create cluster --name kubesec-testing - - sudo kubectl config use-context kind-kubesec-testing - - go version - - go get -u github.com/controlplaneio/kubesec/cmd/kubesec - - sudo make deploy-kubesec - - sudo kind delete cluster --name kubesec-testing + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make deploy-chaosmesh + - sudo kind delete cluster --name tutorial-cluster after_success: - deactivate -# ######################## OK ################################# # # #https://docs.cilium.io/en/latest/gettingstarted/kind/ # - name: "cilium hubble local kind w snapped kubectl helm Python 3.7 on bionic" #OK From 7a7a24ea395b2de9c2bbf9141e26a51c91389f69 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Tue, 13 Oct 2020 00:41:35 +0300 Subject: [PATCH 29/32] =?UTF-8?q?=D0=B0=D0=B3`=D0=BE=D0=BD=D0=B8=D0=B8?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 29 ++++++++++++- Makefile | 3 ++ app/deploy-cilium.sh | 2 +- app/deploy-voting-app.sh | 40 ++++++++++++++++++ app/voting/demo-ingress.yaml | 28 +++++++++++++ app/voting/deploy-voting-app.sh | 41 +++++++++++++++++++ app/voting/deployments/db-deployment.yaml | 34 +++++++++++++++ app/voting/deployments/nginx-deployment.yml | 20 +++++++++ app/voting/deployments/redis-deployment.yaml | 29 +++++++++++++ app/voting/deployments/result-deployment.yaml | 23 +++++++++++ app/voting/deployments/vote-deployment.yaml | 23 +++++++++++ app/voting/deployments/worker-deployment.yaml | 20 +++++++++ app/voting/services/db-service.yaml | 15 +++++++ app/voting/services/redis-service.yaml | 15 +++++++ app/voting/services/result-service.yaml | 16 ++++++++ app/voting/services/vote-service.yaml | 16 ++++++++ 16 files changed, 352 insertions(+), 2 deletions(-) create mode 100644 app/deploy-voting-app.sh create mode 100644 app/voting/demo-ingress.yaml create mode 100644 app/voting/deploy-voting-app.sh create mode 100644 app/voting/deployments/db-deployment.yaml create mode 100644 app/voting/deployments/nginx-deployment.yml create mode 100644 app/voting/deployments/redis-deployment.yaml create mode 100644 app/voting/deployments/result-deployment.yaml create mode 100644 app/voting/deployments/vote-deployment.yaml create mode 100644 app/voting/deployments/worker-deployment.yaml create mode 100644 app/voting/services/db-service.yaml create mode 100644 app/voting/services/redis-service.yaml create mode 100644 app/voting/services/result-service.yaml create mode 100644 app/voting/services/vote-service.yaml diff --git a/.travis.yml b/.travis.yml index f4cfbbb..e69547c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -334,8 +334,35 @@ matrix: # after_success: # - deactivate + - name: "voting-app w snapped kubectl helm Python 3.7 on bionic amd64" + dist: bionic + arch: amd64 + addons: + snaps: + - name: kubectl + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + - name: helm + confinement: classic # or devmode + channel: latest/stable # will be passed to --channel flag + language: python + python: 3.7 + before_install: + - pip3 install virtualenv + - virtualenv -p $(which python3) ~venvpy3 + - source ~venvpy3/bin/activate + - pip install -r requirements.txt + script: + - sudo make deploy-kind + - sudo kind create cluster --name tutorial-cluster + - sudo kubectl config use-context kind-tutorial-cluster + - sudo make deploy-chaosmesh + - sudo kind delete cluster --name tutorial-cluster + after_success: + - deactivate + # ######################## TESTED OK STARTS ################################# - - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK + - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic amd64" #OK dist: bionic arch: amd64 addons: diff --git a/Makefile b/Makefile index eb05f49..084523e 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,9 @@ IMAGE := alpine/fio APP:="app/deploy-openesb.sh" +deploy-voting-app: + bash app/deploy-voting-app.sh + deploy-chaosmesh: bash app/deploy-chaosmesh.sh diff --git a/app/deploy-cilium.sh b/app/deploy-cilium.sh index 618c802..1026683 100644 --- a/app/deploy-cilium.sh +++ b/app/deploy-cilium.sh @@ -24,7 +24,7 @@ helm install cilium ./cilium \ --set global.pullPolicy=IfNotPresent \ --set config.ipam=kubernetes -echo echo "Waiting for cilium to be ready ..." +echo "Waiting for cilium to be ready ..." for i in {1..60}; do # Timeout after 3 minutes, 60x5=300 secs if kubectl get pods --namespace=kube-system | grep ContainerCreating ; then sleep 5 diff --git a/app/deploy-voting-app.sh b/app/deploy-voting-app.sh new file mode 100644 index 0000000..da85447 --- /dev/null +++ b/app/deploy-voting-app.sh @@ -0,0 +1,40 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +echo "===============================deploy voting app===========================================================" + +cd app/voting +kubectl create namespace vote +kubectl create -f deployments/ +kubectl create -f services/ +minikube addons enable ingress +kubectl apply -f demo-ingress.yaml +kubectl --namespace=vote get ingress + +# Add the following line to the bottom of the /etc/hosts file +# demo-kubernetes.info + + + +#Deploy the sample application +kubectl get service --all-namespaces #list all services in all namespace +kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. +kubectl get pods + +for i in {1..60}; do # Timeout after 5 minutes, 60x5=300 secs, 3 mins + if kubectl get pods --namespace=vote |grep ContainerCreating ; then + sleep 5 + else + break + fi +done + +kubectl get service --all-namespaces #list all services in all namespace +# Verify your installation +kubectl get pod -n vote + +kubectl delete namespace vote \ No newline at end of file diff --git a/app/voting/demo-ingress.yaml b/app/voting/demo-ingress.yaml new file mode 100644 index 0000000..3c90565 --- /dev/null +++ b/app/voting/demo-ingress.yaml @@ -0,0 +1,28 @@ +apiVersion: networking.k8s.io/v1beta1 +kind: Ingress +metadata: + name: demo-vote-ingress + namespace: vote + annotations: + kubernetes.io/ingress.class: nginx + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/configuration-snippet: | + location ~ \.css { + add_header Content-Type text/css; + } + location ~ \.js { + add_header Content-Type application/x-javascript; + } +spec: + rules: + - host: demo-kubernetes.info + http: + paths: + - path: /vote/* + backend: + serviceName: vote + servicePort: 5000 + - path: /static/* + backend: + serviceName: vote + servicePort: 5000 \ No newline at end of file diff --git a/app/voting/deploy-voting-app.sh b/app/voting/deploy-voting-app.sh new file mode 100644 index 0000000..7ddf76d --- /dev/null +++ b/app/voting/deploy-voting-app.sh @@ -0,0 +1,41 @@ +#!/bin/bash +set -o errexit +set -o pipefail +set -o nounset +set -o xtrace +# set -eox pipefail #safety for script + +# https://github.com/yosoyvilla/k8s-demo +echo "===============================deploy voting app===========================================================" + +git clone https://github.com/yosoyvilla/k8s-demo.git && cd k8s-demo +kubectl create namespace vote +kubectl create -f deployments/ +kubectl create -f services/ +minikube addons enable ingress +kubectl apply -f demo-ingress.yaml +kubectl --namespace=vote get ingress + +# Add the following line to the bottom of the /etc/hosts file +# demo-kubernetes.info + + + +#Deploy the sample application +kubectl get service --all-namespaces #list all services in all namespace +kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. +kubectl get pods + +for i in {1..60}; do # Timeout after 5 minutes, 60x5=300 secs, 3 mins + if kubectl get pods --namespace=vote |grep ContainerCreating ; then + sleep 5 + else + break + fi +done + +kubectl get service --all-namespaces #list all services in all namespace +# Verify your installation +kubectl get pod -n vote + +kubectl delete namespace vote \ No newline at end of file diff --git a/app/voting/deployments/db-deployment.yaml b/app/voting/deployments/db-deployment.yaml new file mode 100644 index 0000000..7fb0e9b --- /dev/null +++ b/app/voting/deployments/db-deployment.yaml @@ -0,0 +1,34 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: db + name: db + namespace: vote +spec: + replicas: 1 + selector: + matchLabels: + app: db + template: + metadata: + labels: + app: db + spec: + containers: + - image: postgres:9.4 + name: postgres + env: + - name: POSTGRES_USER + value: postgres + - name: POSTGRES_PASSWORD + value: postgres + ports: + - containerPort: 5432 + name: postgres + volumeMounts: + - mountPath: /var/lib/postgresql/data + name: db-data + volumes: + - name: db-data + emptyDir: {} \ No newline at end of file diff --git a/app/voting/deployments/nginx-deployment.yml b/app/voting/deployments/nginx-deployment.yml new file mode 100644 index 0000000..e3b0a41 --- /dev/null +++ b/app/voting/deployments/nginx-deployment.yml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx + namespace: vote +spec: + replicas: 1 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - name: nginx + image: nginx:1.7.9 + ports: + - containerPort: 80 \ No newline at end of file diff --git a/app/voting/deployments/redis-deployment.yaml b/app/voting/deployments/redis-deployment.yaml new file mode 100644 index 0000000..f2b695d --- /dev/null +++ b/app/voting/deployments/redis-deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: redis + name: redis + namespace: vote +spec: + replicas: 1 + selector: + matchLabels: + app: redis + template: + metadata: + labels: + app: redis + spec: + containers: + - image: redis:alpine + name: redis + ports: + - containerPort: 6379 + name: redis + volumeMounts: + - mountPath: /data + name: redis-data + volumes: + - name: redis-data + emptyDir: {} \ No newline at end of file diff --git a/app/voting/deployments/result-deployment.yaml b/app/voting/deployments/result-deployment.yaml new file mode 100644 index 0000000..5f94703 --- /dev/null +++ b/app/voting/deployments/result-deployment.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: result + name: result + namespace: vote +spec: + replicas: 2 + selector: + matchLabels: + app: result + template: + metadata: + labels: + app: result + spec: + containers: + - image: dockersamples/examplevotingapp_result:before + name: result + ports: + - containerPort: 80 + name: result \ No newline at end of file diff --git a/app/voting/deployments/vote-deployment.yaml b/app/voting/deployments/vote-deployment.yaml new file mode 100644 index 0000000..fb97cb3 --- /dev/null +++ b/app/voting/deployments/vote-deployment.yaml @@ -0,0 +1,23 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: vote + name: vote + namespace: vote +spec: + replicas: 2 + selector: + matchLabels: + app: vote + template: + metadata: + labels: + app: vote + spec: + containers: + - image: dockersamples/examplevotingapp_vote + name: vote + ports: + - containerPort: 80 + name: vote \ No newline at end of file diff --git a/app/voting/deployments/worker-deployment.yaml b/app/voting/deployments/worker-deployment.yaml new file mode 100644 index 0000000..b785148 --- /dev/null +++ b/app/voting/deployments/worker-deployment.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: worker + name: worker + namespace: vote +spec: + replicas: 1 + selector: + matchLabels: + app: worker + template: + metadata: + labels: + app: worker + spec: + containers: + - image: dockersamples/examplevotingapp_worker + name: worker \ No newline at end of file diff --git a/app/voting/services/db-service.yaml b/app/voting/services/db-service.yaml new file mode 100644 index 0000000..e12636f --- /dev/null +++ b/app/voting/services/db-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: db + name: db + namespace: vote +spec: + type: ClusterIP + ports: + - name: "db-service" + port: 5432 + targetPort: 5432 + selector: + app: db \ No newline at end of file diff --git a/app/voting/services/redis-service.yaml b/app/voting/services/redis-service.yaml new file mode 100644 index 0000000..99071ba --- /dev/null +++ b/app/voting/services/redis-service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: redis + name: redis + namespace: vote +spec: + type: ClusterIP + ports: + - name: "redis-service" + port: 6379 + targetPort: 6379 + selector: + app: redis \ No newline at end of file diff --git a/app/voting/services/result-service.yaml b/app/voting/services/result-service.yaml new file mode 100644 index 0000000..6a5024e --- /dev/null +++ b/app/voting/services/result-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: result + name: result + namespace: vote +spec: + type: NodePort + ports: + - name: "result-service" + port: 5001 + targetPort: 80 + nodePort: 31001 + selector: + app: result \ No newline at end of file diff --git a/app/voting/services/vote-service.yaml b/app/voting/services/vote-service.yaml new file mode 100644 index 0000000..5034a72 --- /dev/null +++ b/app/voting/services/vote-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: vote + name: vote + namespace: vote +spec: + type: NodePort + ports: + - name: "vote-service" + port: 5000 + targetPort: 80 + nodePort: 31000 + selector: + app: vote \ No newline at end of file From e9d7bcc1c9f1591e7d061b2f8bd7db4c1443cd67 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Tue, 13 Oct 2020 16:11:22 +0300 Subject: [PATCH 30/32] voting-app --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index e69547c..a54fd66 100644 --- a/.travis.yml +++ b/.travis.yml @@ -384,7 +384,7 @@ matrix: - sudo make deploy-kind - sudo kind create cluster --name tutorial-cluster - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-chaosmesh + - sudo make deploy-voting-app - sudo kind delete cluster --name tutorial-cluster after_success: - deactivate From 66c4432c7ce72a24fa599faea6b59ce2337cd813 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Tue, 13 Oct 2020 16:20:48 +0300 Subject: [PATCH 31/32] =?UTF-8?q?=09=D0=B0=D0=B3`=D0=BE=D0=BD=D0=B8=D0=B5?= =?UTF-8?q?=D0=B9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .travis.yml | 54 ++++++++++++++++----------------- app/deploy-voting-app.sh | 2 +- app/voting/deploy-voting-app.sh | 41 ------------------------- 3 files changed, 28 insertions(+), 69 deletions(-) delete mode 100644 app/voting/deploy-voting-app.sh diff --git a/.travis.yml b/.travis.yml index a54fd66..f972bed 100644 --- a/.travis.yml +++ b/.travis.yml @@ -356,38 +356,38 @@ matrix: - sudo make deploy-kind - sudo kind create cluster --name tutorial-cluster - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-chaosmesh + - sudo make deploy-voting-app - sudo kind delete cluster --name tutorial-cluster after_success: - deactivate # ######################## TESTED OK STARTS ################################# - - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic amd64" #OK - dist: bionic - arch: amd64 - addons: - snaps: - - name: kubectl - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - - name: helm - confinement: classic # or devmode - channel: latest/stable # will be passed to --channel flag - language: python - python: 3.7 - before_install: - - pip3 install virtualenv - - virtualenv -p $(which python3) ~venvpy3 - - source ~venvpy3/bin/activate - - pip install -r requirements.txt - script: - - sudo make deploy-kind - - sudo kind create cluster --name tutorial-cluster - - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-voting-app - - sudo kind delete cluster --name tutorial-cluster - after_success: - - deactivate + # - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic amd64" #OK + # dist: bionic + # arch: amd64 + # addons: + # snaps: + # - name: kubectl + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # - name: helm + # confinement: classic # or devmode + # channel: latest/stable # will be passed to --channel flag + # language: python + # python: 3.7 + # before_install: + # - pip3 install virtualenv + # - virtualenv -p $(which python3) ~venvpy3 + # - source ~venvpy3/bin/activate + # - pip install -r requirements.txt + # script: + # - sudo make deploy-kind + # - sudo kind create cluster --name tutorial-cluster + # - sudo kubectl config use-context kind-tutorial-cluster + # - sudo make deploy-chaosmesh + # - sudo kind delete cluster --name tutorial-cluster + # after_success: + # - deactivate # # #https://docs.cilium.io/en/latest/gettingstarted/kind/ diff --git a/app/deploy-voting-app.sh b/app/deploy-voting-app.sh index da85447..1cc0313 100644 --- a/app/deploy-voting-app.sh +++ b/app/deploy-voting-app.sh @@ -11,7 +11,7 @@ cd app/voting kubectl create namespace vote kubectl create -f deployments/ kubectl create -f services/ -minikube addons enable ingress +# minikube addons enable ingress kubectl apply -f demo-ingress.yaml kubectl --namespace=vote get ingress diff --git a/app/voting/deploy-voting-app.sh b/app/voting/deploy-voting-app.sh deleted file mode 100644 index 7ddf76d..0000000 --- a/app/voting/deploy-voting-app.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash -set -o errexit -set -o pipefail -set -o nounset -set -o xtrace -# set -eox pipefail #safety for script - -# https://github.com/yosoyvilla/k8s-demo -echo "===============================deploy voting app===========================================================" - -git clone https://github.com/yosoyvilla/k8s-demo.git && cd k8s-demo -kubectl create namespace vote -kubectl create -f deployments/ -kubectl create -f services/ -minikube addons enable ingress -kubectl apply -f demo-ingress.yaml -kubectl --namespace=vote get ingress - -# Add the following line to the bottom of the /etc/hosts file -# demo-kubernetes.info - - - -#Deploy the sample application -kubectl get service --all-namespaces #list all services in all namespace -kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it. -kubectl get pods - -for i in {1..60}; do # Timeout after 5 minutes, 60x5=300 secs, 3 mins - if kubectl get pods --namespace=vote |grep ContainerCreating ; then - sleep 5 - else - break - fi -done - -kubectl get service --all-namespaces #list all services in all namespace -# Verify your installation -kubectl get pod -n vote - -kubectl delete namespace vote \ No newline at end of file From 02063994339c71a419e76fc58d1f7e2ef6570dd4 Mon Sep 17 00:00:00 2001 From: githubfoam Date: Wed, 14 Oct 2020 14:13:59 +0300 Subject: [PATCH 32/32] init --- .travis.yml | 930 +--------------------------------------------------- 1 file changed, 3 insertions(+), 927 deletions(-) diff --git a/.travis.yml b/.travis.yml index f972bed..50000f7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,340 +1,17 @@ --- sudo: required -dist: bionic +dist: focal notifications: slack: on_failure: always -#https://istio.io/docs/setup/platform-setup/gardener/ -#https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md -fleet_script_gardener_macos_tasks : &fleet_script_gardener_macos_tasks #If you are running minikube within a VM, consider using --driver=none - script: - # Install kind via brew - - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" #Install brew - - brew install kubernetes-cli #Installing kubectl and helm - - brew install kubernetes-helm - - brew install git #Installing git - - brew install openvpn #Installing openvpn - - export PATH=$(brew --prefix openvpn)/sbin:$PATH - #Alternatively, you can also install Docker for Desktop and kind. - #Installing Minikube - - brew install minikube #https://minikube.sigs.k8s.io/docs/start/ - - which minikube - #Alternatively,Installing Minikube - # - curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64 - # - sudo install minikube-darwin-amd64 /usr/local/bin/minikube - # - which minikube - # - brew cask remove minikube #If which minikube fails after installation via brew, you may have to remove the minikube cask and link the binary - # - brew link minikube - - brew install iproute2mac #Installing iproute2 - - go get -u github.com/bronze1man/yaml2json #Installing yaml2json and jq - - brew install jq - # - brew install coreutils gnu-sed #Install GNU core utilities,Error: coreutils 8.31 is already installed - #Local Gardener setup - - git clone git@github.com:gardener/gardener.git && cd gardener - # Using the nodeless cluster setup,Setting up a local nodeless Garden cluster is quite simple - # The only prerequisite is a running docker daemon. Just use the provided Makefile rules to start your local Garden - - make local-garden-up #start all minimally required components of a Kubernetes cluster (etcd, kube-apiserver, kube-controller-manager) and an etcd Instance for the gardener-apiserver as Docker containers - - make local-garden-down #tear down the local Garden cluster and remove the Docker containers - # istio Kubernetes Gardener Bootstrapping Gardener #https://istio.io/docs/setup/platform-setup/gardener/ - #Install and configure kubectl https://kubernetes.io/docs/tasks/tools/install-kubectl/ - - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl" - - chmod +x ./kubectl #Make the kubectl binary executable - - sudo mv ./kubectl /usr/local/bin/kubectl #Move the binary in to your PATH - - kubectl version --client #Test to ensure the version you installed is up-to-date - matrix: fast_finish: true include: - #https://docs.cilium.io/en/latest/gettingstarted/kind/ - # - name: "simulate Cluster Mesh in a sandbox cilium kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name=cluster1 --config=app/kind-cluster1.yaml - # - sudo kind create cluster --name=cluster2 --config=app/kind-cluster2.yaml - # - sudo make deploy-cluster-mesh - # # - sudo kind delete cluster --name cilium-testing - # after_success: - # - deactivate - - #https://docs.cilium.io/en/latest/gettingstarted/kind/ - # - name: "cilium hubble distributed kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --config=app/kind-config.yaml - # - sudo kubectl cluster-info --context kind-kind - # - sudo make deploy-cilium-hubble-dist - # # - sudo kind delete cluster --name cilium-testing - # after_success: - # - deactivate - - - # - name: "ELK kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name tutorial-cluster - # - sudo kubectl config use-context kind-tutorial-cluster - # - sudo make deploy-elk - # - sudo kind delete cluster --name tutorial-cluster - # after_success: - # - deactivate - - - # - name: "ELK kind wo snaps Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # # addons: - # # snaps: - # # - name: kubectl - # # confinement: classic # or devmode - # # channel: latest/stable # will be passed to --channel flag - # # - name: helm - # # confinement: classic # or devmode - # # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name tutorial-cluster - # - sudo kubectl config use-context kind-tutorial-cluster - # - sudo make provision-kubectl - # - sudo make provision-helm - # - sudo make deploy-elk - # - sudo kind delete cluster --name tutorial-cluster - # after_success: - # - deactivate - - - # - name: "EFK kind wo snaps Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # # addons: - # # snaps: - # # - name: kubectl - # # confinement: classic # or devmode - # # channel: latest/stable # will be passed to --channel flag - # # - name: helm - # # confinement: classic # or devmode - # # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name tutorial-cluster - # - sudo kubectl config use-context kind-tutorial-cluster - # - sudo make provision-kubectl - # - sudo make provision-helm - # - sudo make deploy-efk - # - sudo kind delete cluster --name tutorial-cluster - # after_success: - # - deactivate - - # - name: "EFK kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name tutorial-cluster - # - sudo kubectl config use-context kind-tutorial-cluster - # - sudo make deploy-efk - # - sudo kind delete cluster --name tutorial-cluster - # after_success: - # - deactivate - - # - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name tutorial-cluster - # - sudo kubectl config use-context kind-tutorial-cluster - # - sudo make deploy-microservices - # - sudo kind delete cluster --name tutorial-cluster - # after_success: - # - deactivate - - - - - - - # - name: "istio service mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --config=app/kind-config.yaml - # - sudo kubectl cluster-info --context kind-kind - # - sudo make deploy-istio - # # - sudo kind delete cluster --name cilium-testing - # after_success: - # - deactivate - - #MOVED - # - name: "openfaas w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name openfaas-testing - # - sudo kubectl config use-context kind-openfaas-testing - # - sudo make deploy-openfaas - # - sudo kind delete cluster --name openfaas-testing - # after_success: - # - deactivate - - # - name: "kubesec w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: go - # # before_install: - # # - pip3 install virtualenv - # # - virtualenv -p $(which python3) ~venvpy3 - # # - source ~venvpy3/bin/activate - # # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name kubesec-testing - # - sudo kubectl config use-context kind-kubesec-testing - # - go version - # - go get -u github.com/controlplaneio/kubesec/cmd/kubesec - # - sudo make deploy-kubesec - # - sudo kind delete cluster --name kubesec-testing - # after_success: - # - deactivate - - name: "voting-app w snapped kubectl helm Python 3.7 on bionic amd64" + - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK dist: bionic arch: amd64 addons: @@ -356,609 +33,8 @@ matrix: - sudo make deploy-kind - sudo kind create cluster --name tutorial-cluster - sudo kubectl config use-context kind-tutorial-cluster - - sudo make deploy-voting-app + - sudo make deploy-microservices - sudo kind delete cluster --name tutorial-cluster after_success: - deactivate -# ######################## TESTED OK STARTS ################################# - # - name: "chaos mesh kind w snapped kubectl helm Python 3.7 on bionic amd64" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name tutorial-cluster - # - sudo kubectl config use-context kind-tutorial-cluster - # - sudo make deploy-chaosmesh - # - sudo kind delete cluster --name tutorial-cluster - # after_success: - # - deactivate - - # - # #https://docs.cilium.io/en/latest/gettingstarted/kind/ - # - name: "cilium hubble local kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --config=app/kind-config.yaml - # - sudo kubectl cluster-info --context kind-kind - # - sudo make deploy-cilium-hubble-local - # # - sudo kind delete cluster --name cilium-testing - # after_success: - # - deactivate - # - # #https://docs.cilium.io/en/latest/gettingstarted/kind/ - # - name: "cilium kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --config=app/kind-config.yaml - # - sudo kubectl cluster-info --context kind-kind - # - sudo make deploy-cilium - # # - sudo kind delete cluster --name cilium-testing - # after_success: - # - deactivate - # - - # - name: "kubeflow kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # # <<: *fleet_install_tasks - # # <<: *fleet_script_tasks - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name kubeflow-testing - # - sudo kubectl config use-context kind-kubeflow-testing - # - sudo make deploy-kubeflow - # - sudo kind delete cluster --name kubeflow-testing - # after_success: - # - deactivate - # - # - name: "weavescope kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # # <<: *fleet_install_tasks - # # <<: *fleet_script_tasks - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name weavescope-testing - # - sudo kubectl config use-context kind-weavescope-testing - # - sudo make deploy-weavescope - # - sudo kind delete cluster --name weavescope-testing - # after_success: - # - deactivate - # - # - name: "kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # - pip install -r requirements.txt - # # <<: *fleet_install_tasks - # # <<: *fleet_script_tasks - # script: - # - sudo make deploy-kind - # after_success: - # - deactivate - # - # - name: "openesb kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name openesb-testing - # - sudo kubectl config use-context kind-openesb-testing - # - sudo make deploy-openesb - # - sudo kind delete cluster --name openesb-testing - # after_success: - # - deactivate - # - # - name: "k8s dashboard kind w snapped kubectl helm Python 3.7 on bionic" #OK - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # - name: helm - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # script: - # - sudo make deploy-kind - # - sudo kind create cluster --name dashboard-testing - # - sudo kubectl config use-context kind-dashboard-testing - # - sudo make deploy-dashboard - # - sudo kind delete cluster --name dashboard-testing - # after_success: - # - deactivate -# ######################################################### - # - name: "kind gardener Python 3.7 on bionic" - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_dashboard_tasks - # after_success: - # - deactivate - # - # - name: "kind istio Python 3.7 on bionic" - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_kind_istio_tasks - # after_success: - # - deactivate - # - # - # - name: "kind Python 3.7 on bionic" - # dist: bionic - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_kind_istio_tasks - # after_success: - # - deactivate - # - # - name: "kind gardener Python 3.7 on xenial" - # dist: xenial - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_dashboard_tasks - # after_success: - # - deactivate - # - # - name: "kind istio Python 3.7 on xenial" - # dist: xenial - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_kind_istio_tasks - # after_success: - # - deactivate - # - # - name: "kind Python 3.7 on xenial" - # dist: xenial - # arch: amd64 - # addons: - # snaps: - # - name: kubectl - # confinement: classic # or devmode - # channel: latest/stable # will be passed to --channel flag - # language: python - # python: 3.7 - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_dashboard_tasks - # after_success: - # - deactivate - - # - name: "Python 3.7 on bionic arm64" # package architecture (amd64) does not match system (arm64) - # os: linux - # arch: arm64 - # dist: bionic - # language: python - # python: 3.7 - # # env: - # # - LIB_PATH="/usr/bin/shared/x86_64/v1" - # # compiler: - # # - gcc - # # - clang - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_vagrant_tasks - # after_success: - # - deactivate - # - # - name: "Python 3.7 on bionic ppc64le" #Unable to locate package osquery - # os: linux - # arch: ppc64le - # dist: bionic - # language: python - # python: 3.7 - # # env: - # # - LIB_PATH="/usr/bin/shared/x86_64/v1" - # # compiler: - # # - gcc - # # - clang - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_vagrant_tasks - # after_success: - # - deactivate - # - # - name: "Python 3.7 on bionic s390x" #Unable to locate package osquery - # os: linux - # arch: s390x - # dist: bionic - # language: python - # python: 3.7 - # # env: - # # - LIB_PATH="/usr/bin/shared/x86_64/v1" - # # compiler: - # # - gcc - # # - clang - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_vagrant_tasks - # after_success: - # - deactivate - - - # - name: "Python 2.7 on xenial amd64" - # dist: xenial - # language: python - # python: 2.7 - # before_install: - # - pip install virtualenv - # - virtualenv -p $(which python2) ~venvpy2 - # - source ~venvpy2/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # - # after_success: - # - deactivate - - # - name: "Python 3.7 on xenial arm64" - # os: linux - # arch: arm64 - # dist: xenial - # language: python - # python: 3.7 - # # env: - # # - LIB_PATH="/usr/bin/shared/x86_64/v1" - # # compiler: - # # - gcc - # # - clang - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_vagrant_tasks - # after_success: - # - deactivate - # - # - name: "Python 3.7 on xenial ppc64le" #Unable to locate package osquery - # os: linux - # arch: ppc64le - # dist: xenial - # language: python - # python: 3.7 - # # env: - # # - LIB_PATH="/usr/bin/shared/x86_64/v1" - # # compiler: - # # - gcc - # # - clang - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_vagrant_tasks - # after_success: - # - deactivate - # - # - name: "Python 3.7 on xenial s390x" #Unable to locate package osquery - # os: linux - # arch: s390x - # dist: xenial - # language: python - # python: 3.7 - # # env: - # # - LIB_PATH="/usr/bin/shared/x86_64/v1" - # # compiler: - # # - gcc - # # - clang - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_vagrant_tasks - # after_success: - # - deactivate - - - - - # - name: "kind brew Python 2.7.17 on macOS xcode10.2" - # os: osx - # osx_image: xcode10.2 - # language: shell - # before_install: - # - pip install virtualenv - # - virtualenv -p $(which python2) ~venvpy2 - # - source ~venvpy2/bin/activate - # # Install kind via brew - # - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" #Install brew - # - brew install kind - # - kind create cluster # Default cluster context name is `kind`. - # - kind create cluster --name kind-2 - # - kind get clusters # list kind clusters - # - kubectl cluster-info --context kind-kind #In order to interact with a specific cluster, you only need to specify the cluster name as a context in kubectl - # - kind load docker-image hello-world #Docker images can be loaded into your cluster nodes - # # - kind load image-archive /my-image-archive.tar #image archives can be loaded - # # custom DockerFile build workflow starts, don't use a :latest tag - # # - docker build -t my-custom-image:unique-tag ./my-image-dir - # # - kind load docker-image my-custom-image:unique-tag - # # - kubectl apply -f my-manifest-using-my-image:unique-tag - # # - docker exec -it my-node-name crictl images # get a list of images present on a cluster node,my-node-name is the name of the Docker container - # # - kind build node-image --type bazel #by using docker or bazel. To specify the build type use the flag --type - # # custom DockerFile build workflow ends - # - kubectl cluster-info --context kind-kind-2 - # - kind load docker-image hello-world --name kind-2 #If using a named cluster you will need to specify the name of the cluster you wish to load the image into - # - kind delete cluster #If the flag --name is not specified, kind uses the default cluster context name kind and deletes that cluster - # - kind delete cluster --name kind-2 - # - kind get clusters # list kind clusters - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # after_success: - # - deactivate - - # - name: "gardener Python 2.7.17 on macOS xcode10.2" - # os: osx - # osx_image: xcode10.2 - # language: shell - # before_install: - # - pip install virtualenv - # - virtualenv -p $(which python2) ~venvpy2 - # - source ~venvpy2/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # <<: *fleet_script_gardener_macos_tasks - # after_success: - # - deactivate - - - - - # - name: "Python 3.7.5 on macOS xcode10.2" - # os: osx - # osx_image: xcode10.2 - # language: shell - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # after_success: - # - deactivate - # - # - name: "Python 3.7.5 on macOS xcode9.4 " - # os: osx - # osx_image: xcode9.4 - # language: shell - # before_install: - # - pip3 install virtualenv - # - virtualenv -p $(which python3) ~venvpy3 - # - source ~venvpy3/bin/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # after_success: - # - deactivate - - - - # - name: "kind chocolatey Python 3.8 on Windows" - # os: windows - # language: shell - # env: - # - PATH=/c/Python38:/c/Python38/Scripts:$PATH - # before_install: - # - choco install python --version 3.8.1 - # - pip install virtualenv - # - virtualenv $HOME/venv - # - source $HOME/venv/Scripts/activate - # # Install kind via chocolatey - # # - Get-ExecutionPolicy #If it returns Restricted, then run Set-ExecutionPolicy AllSigned or Set-ExecutionPolicy Bypass -Scope Process. - # # - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1')) - # - choco install kind - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # after_success: - # - deactivate - - # - name: "Python 3.7 on Windows" - # os: windows - # language: shell - # env: PATH=/c/Python37:/c/Python37/Scripts:$PATH - # before_install: - # - choco install python --version 3.7.3 - # - python -m pip install virtualenv - # - virtualenv $HOME/venv - # - source $HOME/venv/Scripts/activate - # <<: *fleet_install_tasks - # <<: *fleet_script_tasks - # after_success: - # - deactivate pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy