diff --git a/.travis.yml b/.travis.yml
index 3806e62..50000f7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,137 +1,17 @@
---
sudo: required
-dist: bionic
+dist: focal
notifications:
slack:
on_failure: always
-#https://istio.io/docs/setup/platform-setup/gardener/
-#https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md
-fleet_script_gardener_macos_tasks : &fleet_script_gardener_macos_tasks #If you are running minikube within a VM, consider using --driver=none
- script:
- # Install kind via brew
- - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" #Install brew
- - brew install kubernetes-cli #Installing kubectl and helm
- - brew install kubernetes-helm
- - brew install git #Installing git
- - brew install openvpn #Installing openvpn
- - export PATH=$(brew --prefix openvpn)/sbin:$PATH
- #Alternatively, you can also install Docker for Desktop and kind.
- #Installing Minikube
- - brew install minikube #https://minikube.sigs.k8s.io/docs/start/
- - which minikube
- #Alternatively,Installing Minikube
- # - curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube-darwin-amd64
- # - sudo install minikube-darwin-amd64 /usr/local/bin/minikube
- # - which minikube
- # - brew cask remove minikube #If which minikube fails after installation via brew, you may have to remove the minikube cask and link the binary
- # - brew link minikube
- - brew install iproute2mac #Installing iproute2
- - go get -u github.com/bronze1man/yaml2json #Installing yaml2json and jq
- - brew install jq
- # - brew install coreutils gnu-sed #Install GNU core utilities,Error: coreutils 8.31 is already installed
- #Local Gardener setup
- - git clone git@github.com:gardener/gardener.git && cd gardener
- # Using the nodeless cluster setup,Setting up a local nodeless Garden cluster is quite simple
- # The only prerequisite is a running docker daemon. Just use the provided Makefile rules to start your local Garden
- - make local-garden-up #start all minimally required components of a Kubernetes cluster (etcd, kube-apiserver, kube-controller-manager) and an etcd Instance for the gardener-apiserver as Docker containers
- - make local-garden-down #tear down the local Garden cluster and remove the Docker containers
- # istio Kubernetes Gardener Bootstrapping Gardener #https://istio.io/docs/setup/platform-setup/gardener/
- #Install and configure kubectl https://kubernetes.io/docs/tasks/tools/install-kubectl/
- - curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/darwin/amd64/kubectl"
- - chmod +x ./kubectl #Make the kubectl binary executable
- - sudo mv ./kubectl /usr/local/bin/kubectl #Move the binary in to your PATH
- - kubectl version --client #Test to ensure the version you installed is up-to-date
-
matrix:
fast_finish: true
include:
- #https://docs.cilium.io/en/latest/gettingstarted/kind/
- # - name: "simulate Cluster Mesh in a sandbox cilium kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --name=cluster1 --config=app/kind-cluster1.yaml
- # - sudo kind create cluster --name=cluster2 --config=app/kind-cluster2.yaml
- # - sudo make deploy-cluster-mesh
- # # - sudo kind delete cluster --name cilium-testing
- # after_success:
- # - deactivate
-
- #https://docs.cilium.io/en/latest/gettingstarted/kind/
- # - name: "cilium hubble distributed kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --config=app/kind-config.yaml
- # - sudo kubectl cluster-info --context kind-kind
- # - sudo make deploy-cilium-hubble-dist
- # # - sudo kind delete cluster --name cilium-testing
- # after_success:
- # - deactivate
-
- # - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --config=app/kind-config.yaml
- # - sudo kubectl cluster-info --context kind-kind
- # - sudo make deploy-microservices
- # after_success:
- # - deactivate
-
- - name: "istio service mesh kind w snapped kubectl helm Python 3.7 on bionic" #OK
+ - name: "microservices kind w snapped kubectl helm Python 3.7 on bionic" #OK
dist: bionic
arch: amd64
addons:
@@ -151,584 +31,10 @@ matrix:
- pip install -r requirements.txt
script:
- sudo make deploy-kind
- - sudo kind create cluster --config=app/kind-config.yaml
- - sudo kubectl cluster-info --context kind-kind
- - sudo make deploy-istio
- # - sudo kind delete cluster --name cilium-testing
+ - sudo kind create cluster --name tutorial-cluster
+ - sudo kubectl config use-context kind-tutorial-cluster
+ - sudo make deploy-microservices
+ - sudo kind delete cluster --name tutorial-cluster
after_success:
- deactivate
-# ######################## OK #################################
- #
- # #https://docs.cilium.io/en/latest/gettingstarted/kind/
- # - name: "cilium hubble local kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --config=app/kind-config.yaml
- # - sudo kubectl cluster-info --context kind-kind
- # - sudo make deploy-cilium-hubble-local
- # # - sudo kind delete cluster --name cilium-testing
- # after_success:
- # - deactivate
- #
- # #https://docs.cilium.io/en/latest/gettingstarted/kind/
- # - name: "cilium kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --config=app/kind-config.yaml
- # - sudo kubectl cluster-info --context kind-kind
- # - sudo make deploy-cilium
- # # - sudo kind delete cluster --name cilium-testing
- # after_success:
- # - deactivate
- #
-
- # - name: "kubeflow kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # # <<: *fleet_install_tasks
- # # <<: *fleet_script_tasks
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --name kubeflow-testing
- # - sudo kubectl config use-context kind-kubeflow-testing
- # - sudo make deploy-kubeflow
- # - sudo kind delete cluster --name kubeflow-testing
- # after_success:
- # - deactivate
- #
- # - name: "weavescope kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # # <<: *fleet_install_tasks
- # # <<: *fleet_script_tasks
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --name weavescope-testing
- # - sudo kubectl config use-context kind-weavescope-testing
- # - sudo make deploy-weavescope
- # - sudo kind delete cluster --name weavescope-testing
- # after_success:
- # - deactivate
- #
- # - name: "kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # - pip install -r requirements.txt
- # # <<: *fleet_install_tasks
- # # <<: *fleet_script_tasks
- # script:
- # - sudo make deploy-kind
- # after_success:
- # - deactivate
- #
- # - name: "openesb kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --name openesb-testing
- # - sudo kubectl config use-context kind-openesb-testing
- # - sudo make deploy-openesb
- # - sudo kind delete cluster --name openesb-testing
- # after_success:
- # - deactivate
- #
- # - name: "k8s dashboard kind w snapped kubectl helm Python 3.7 on bionic" #OK
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # - name: helm
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # script:
- # - sudo make deploy-kind
- # - sudo kind create cluster --name dashboard-testing
- # - sudo kubectl config use-context kind-dashboard-testing
- # - sudo make deploy-dashboard
- # - sudo kind delete cluster --name dashboard-testing
- # after_success:
- # - deactivate
-# #########################################################
- # - name: "kind gardener Python 3.7 on bionic"
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_dashboard_tasks
- # after_success:
- # - deactivate
- #
- # - name: "kind istio Python 3.7 on bionic"
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_kind_istio_tasks
- # after_success:
- # - deactivate
- #
- #
- # - name: "kind Python 3.7 on bionic"
- # dist: bionic
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_kind_istio_tasks
- # after_success:
- # - deactivate
- #
- # - name: "kind gardener Python 3.7 on xenial"
- # dist: xenial
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_dashboard_tasks
- # after_success:
- # - deactivate
- #
- # - name: "kind istio Python 3.7 on xenial"
- # dist: xenial
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_kind_istio_tasks
- # after_success:
- # - deactivate
- #
- # - name: "kind Python 3.7 on xenial"
- # dist: xenial
- # arch: amd64
- # addons:
- # snaps:
- # - name: kubectl
- # confinement: classic # or devmode
- # channel: latest/stable # will be passed to --channel flag
- # language: python
- # python: 3.7
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_dashboard_tasks
- # after_success:
- # - deactivate
-
- # - name: "Python 3.7 on bionic arm64" # package architecture (amd64) does not match system (arm64)
- # os: linux
- # arch: arm64
- # dist: bionic
- # language: python
- # python: 3.7
- # # env:
- # # - LIB_PATH="/usr/bin/shared/x86_64/v1"
- # # compiler:
- # # - gcc
- # # - clang
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_vagrant_tasks
- # after_success:
- # - deactivate
- #
- # - name: "Python 3.7 on bionic ppc64le" #Unable to locate package osquery
- # os: linux
- # arch: ppc64le
- # dist: bionic
- # language: python
- # python: 3.7
- # # env:
- # # - LIB_PATH="/usr/bin/shared/x86_64/v1"
- # # compiler:
- # # - gcc
- # # - clang
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_vagrant_tasks
- # after_success:
- # - deactivate
- #
- # - name: "Python 3.7 on bionic s390x" #Unable to locate package osquery
- # os: linux
- # arch: s390x
- # dist: bionic
- # language: python
- # python: 3.7
- # # env:
- # # - LIB_PATH="/usr/bin/shared/x86_64/v1"
- # # compiler:
- # # - gcc
- # # - clang
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_vagrant_tasks
- # after_success:
- # - deactivate
-
-
- # - name: "Python 2.7 on xenial amd64"
- # dist: xenial
- # language: python
- # python: 2.7
- # before_install:
- # - pip install virtualenv
- # - virtualenv -p $(which python2) ~venvpy2
- # - source ~venvpy2/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- #
- # after_success:
- # - deactivate
-
- # - name: "Python 3.7 on xenial arm64"
- # os: linux
- # arch: arm64
- # dist: xenial
- # language: python
- # python: 3.7
- # # env:
- # # - LIB_PATH="/usr/bin/shared/x86_64/v1"
- # # compiler:
- # # - gcc
- # # - clang
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_vagrant_tasks
- # after_success:
- # - deactivate
- #
- # - name: "Python 3.7 on xenial ppc64le" #Unable to locate package osquery
- # os: linux
- # arch: ppc64le
- # dist: xenial
- # language: python
- # python: 3.7
- # # env:
- # # - LIB_PATH="/usr/bin/shared/x86_64/v1"
- # # compiler:
- # # - gcc
- # # - clang
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_vagrant_tasks
- # after_success:
- # - deactivate
- #
- # - name: "Python 3.7 on xenial s390x" #Unable to locate package osquery
- # os: linux
- # arch: s390x
- # dist: xenial
- # language: python
- # python: 3.7
- # # env:
- # # - LIB_PATH="/usr/bin/shared/x86_64/v1"
- # # compiler:
- # # - gcc
- # # - clang
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_vagrant_tasks
- # after_success:
- # - deactivate
-
-
-
-
- # - name: "kind brew Python 2.7.17 on macOS xcode10.2"
- # os: osx
- # osx_image: xcode10.2
- # language: shell
- # before_install:
- # - pip install virtualenv
- # - virtualenv -p $(which python2) ~venvpy2
- # - source ~venvpy2/bin/activate
- # # Install kind via brew
- # - /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)" #Install brew
- # - brew install kind
- # - kind create cluster # Default cluster context name is `kind`.
- # - kind create cluster --name kind-2
- # - kind get clusters # list kind clusters
- # - kubectl cluster-info --context kind-kind #In order to interact with a specific cluster, you only need to specify the cluster name as a context in kubectl
- # - kind load docker-image hello-world #Docker images can be loaded into your cluster nodes
- # # - kind load image-archive /my-image-archive.tar #image archives can be loaded
- # # custom DockerFile build workflow starts, don't use a :latest tag
- # # - docker build -t my-custom-image:unique-tag ./my-image-dir
- # # - kind load docker-image my-custom-image:unique-tag
- # # - kubectl apply -f my-manifest-using-my-image:unique-tag
- # # - docker exec -it my-node-name crictl images # get a list of images present on a cluster node,my-node-name is the name of the Docker container
- # # - kind build node-image --type bazel #by using docker or bazel. To specify the build type use the flag --type
- # # custom DockerFile build workflow ends
- # - kubectl cluster-info --context kind-kind-2
- # - kind load docker-image hello-world --name kind-2 #If using a named cluster you will need to specify the name of the cluster you wish to load the image into
- # - kind delete cluster #If the flag --name is not specified, kind uses the default cluster context name kind and deletes that cluster
- # - kind delete cluster --name kind-2
- # - kind get clusters # list kind clusters
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # after_success:
- # - deactivate
-
- # - name: "gardener Python 2.7.17 on macOS xcode10.2"
- # os: osx
- # osx_image: xcode10.2
- # language: shell
- # before_install:
- # - pip install virtualenv
- # - virtualenv -p $(which python2) ~venvpy2
- # - source ~venvpy2/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # <<: *fleet_script_gardener_macos_tasks
- # after_success:
- # - deactivate
-
-
-
-
- # - name: "Python 3.7.5 on macOS xcode10.2"
- # os: osx
- # osx_image: xcode10.2
- # language: shell
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # after_success:
- # - deactivate
- #
- # - name: "Python 3.7.5 on macOS xcode9.4 "
- # os: osx
- # osx_image: xcode9.4
- # language: shell
- # before_install:
- # - pip3 install virtualenv
- # - virtualenv -p $(which python3) ~venvpy3
- # - source ~venvpy3/bin/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # after_success:
- # - deactivate
-
-
-
- # - name: "kind chocolatey Python 3.8 on Windows"
- # os: windows
- # language: shell
- # env:
- # - PATH=/c/Python38:/c/Python38/Scripts:$PATH
- # before_install:
- # - choco install python --version 3.8.1
- # - pip install virtualenv
- # - virtualenv $HOME/venv
- # - source $HOME/venv/Scripts/activate
- # # Install kind via chocolatey
- # # - Get-ExecutionPolicy #If it returns Restricted, then run Set-ExecutionPolicy AllSigned or Set-ExecutionPolicy Bypass -Scope Process.
- # # - Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
- # - choco install kind
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # after_success:
- # - deactivate
-
- # - name: "Python 3.7 on Windows"
- # os: windows
- # language: shell
- # env: PATH=/c/Python37:/c/Python37/Scripts:$PATH
- # before_install:
- # - choco install python --version 3.7.3
- # - python -m pip install virtualenv
- # - virtualenv $HOME/venv
- # - source $HOME/venv/Scripts/activate
- # <<: *fleet_install_tasks
- # <<: *fleet_script_tasks
- # after_success:
- # - deactivate
diff --git a/Makefile b/Makefile
index 52fa1d7..084523e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,30 +1,66 @@
IMAGE := alpine/fio
APP:="app/deploy-openesb.sh"
+deploy-voting-app:
+ bash app/deploy-voting-app.sh
+
+deploy-chaosmesh:
+ bash app/deploy-chaosmesh.sh
+
+deploy-elk:
+ bash app/deploy-elk.sh
+
+deploy-efk:
+ bash app/deploy-efk.sh
+
+provision-helm:
+ bash app/provision-helm.sh
+
+provision-kubectl:
+ bash app/provision-kubectl.sh
+
+deploy-openfaas:
+ bash app/deploy-openfaas.sh
+
+deploy-kubesec:
+ bash app/deploy-kubesec.sh
+
deploy-kind:
bash deploy-kind.sh
+
deploy-microservices:
bash app/deploy-microservices.sh
+
deploy-cilium-cluster-mesh:
bash app/deploy-cilium-cluster-mesh.sh
+
deploy-cilium-hubble-dist:
bash app/deploy-cilium-hubble-dist.sh
+
deploy-cilium-hubble-local:
bash app/deploy-cilium-hubble-local.sh
+
deploy-cilium:
bash app/deploy-cilium.sh
+
deploy-kubeflow:
bash app/deploy-kubeflow.sh
+
deploy-openesb:
bash app/deploy-openesb.sh
+
deploy-weavescope:
bash app/deploy-weavescope.sh
+
deploy-istio:
bash app/deploy-istio.sh
+
deploy-dashboard:
bash app/deploy-dashboard.sh
+
deploy-dashboard-helm:
bash app/deploy-dashboard-helm.sh
+
push-image:
docker push $(IMAGE)
.PHONY: deploy-kind deploy-openesb deploy-dashboard deploy-dashboard-helm deploy-istio push-image
diff --git a/app/counter.yaml b/app/counter.yaml
new file mode 100644
index 0000000..cbcc8ef
--- /dev/null
+++ b/app/counter.yaml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: counter
+spec:
+ containers:
+ - name: count
+ image: busybox
+ args: [/bin/sh, -c, 'i=0; while true; do echo "This is demo log $i: $(date)"; i=$((i+1)); sleep 1; done']
\ No newline at end of file
diff --git a/app/deploy-chaosmesh.sh b/app/deploy-chaosmesh.sh
new file mode 100644
index 0000000..2d173a6
--- /dev/null
+++ b/app/deploy-chaosmesh.sh
@@ -0,0 +1,29 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+# https://chaos-mesh.org/docs/installation/get_started_on_kind/
+echo "===============================Install Chaos Mesh==========================================================="
+
+/bin/sh -c 'curl -sSL https://raw.githubusercontent.com/chaos-mesh/chaos-mesh/master/install.sh | bash -s -- --local kind'
+# curl -sSL https://raw.githubusercontent.com/chaos-mesh/chaos-mesh/master/install.sh | bash -s -- --local kind
+
+
+#Deploy the sample application
+kubectl get service --all-namespaces #list all services in all namespace
+kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it.
+kubectl get pods
+
+for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins
+ if kubectl get pods --namespace=chaos-testing |grep Running ; then
+ break
+ fi
+ sleep 2
+done
+
+kubectl get service --all-namespaces #list all services in all namespace
+# Verify your installation
+kubectl get pod -n chaos-testing
diff --git a/app/deploy-cilium.sh b/app/deploy-cilium.sh
index 618c802..1026683 100644
--- a/app/deploy-cilium.sh
+++ b/app/deploy-cilium.sh
@@ -24,7 +24,7 @@ helm install cilium ./cilium \
--set global.pullPolicy=IfNotPresent \
--set config.ipam=kubernetes
-echo echo "Waiting for cilium to be ready ..."
+echo "Waiting for cilium to be ready ..."
for i in {1..60}; do # Timeout after 3 minutes, 60x5=300 secs
if kubectl get pods --namespace=kube-system | grep ContainerCreating ; then
sleep 5
diff --git a/app/deploy-efk.sh b/app/deploy-efk.sh
new file mode 100644
index 0000000..8a808b4
--- /dev/null
+++ b/app/deploy-efk.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+echo "=============================EFK Elastic Fluentd Kibana============================================================="
+
+helm install elasticsearch stable/elasticsearch
+sleep 10
+
+kubectl apply -f app/fluentd-daemonset-elasticsearch.yaml
+
+helm install kibana stable/kibana -f app/kibana-values.yaml
+
+kubectl apply -f app/counter.yaml
+
+# curl kibana dashboard
diff --git a/app/deploy-elk.sh b/app/deploy-elk.sh
new file mode 100644
index 0000000..4bb3216
--- /dev/null
+++ b/app/deploy-elk.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+echo "=============================ELK Elastic Kibana Logstash============================================================="
+
+kubectl create namespace elk
+kubectl apply --namespace=elk -f - <<"EOF"
+apiVersion: v1
+kind: LimitRange
+metadata:
+ name: mem-limit-range
+spec:
+ limits:
+ - default:
+ memory: 2000Mi
+ cpu: 2000m
+ defaultRequest:
+ memory: 1000Mi
+ cpu: 1000m
+ type: Container
+EOF
+echo "resource quotas applied to the namespace"
+
+helm install --name elastic-stack --namespace=elk stable/elastic-stack -f my-elastic-stack.yaml
+sleep 150
+kubectl get pods -n elk -l "release=elastic-stack"
+
+helm install --name kube-state-metrics --namespace=elk stable/kube-state-metrics
+helm install --name elastic-metricbeat --namespace=elk stable/metricbeat -f my-elastic-metricbeat.yaml # metricbeat dashboard
+kubectl --namespace=elk get pods -l "app=metricbeat,release=elastic-metricbeat"
+
+export POD_NAME=$(kubectl get pods -n elk -l "app=kibana,release=elastic-stack" -o jsonpath="{.items[0].metadata.name}");
+kubectl port-forward -n elk $POD_NAME 5601:5601
+
+# view dashoard
+curl http://localhost:5601/
\ No newline at end of file
diff --git a/app/deploy-istio-kind.sh b/app/deploy-istio-kind.sh
new file mode 100644
index 0000000..e9ea597
--- /dev/null
+++ b/app/deploy-istio-kind.sh
@@ -0,0 +1,129 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+#https://kind.sigs.k8s.io/docs/user/quick-start/
+#https://istio.io/docs/setup/platform-setup/kind/
+echo "=============================kind istio============================================================="
+docker version
+curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64
+chmod +x ./kind
+sudo mv ./kind /usr/local/bin/kind
+kind get clusters #see the list of kind clusters
+kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind
+kind get clusters
+# - sudo snap install kubectl --classic
+kubectl config get-contexts #list the local Kubernetes contexts
+kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl
+
+#https://istio.io/latest/docs/setup/getting-started/
+echo "===============================Install istio==========================================================="
+#Download Istio
+#/bin/sh -c 'curl -L https://istio.io/downloadIstio | sh -' #download and extract the latest release automatically (Linux or macOS)
+export ISTIORELEASE="1.6"
+export ISTIOVERSION="1.6.4"
+/bin/sh -c 'curl -L https://istio.io/downloadIstio | ISTIO_VERSION=$ISTIOVERSION sh -' #download a specific version
+
+cd istio-* #Move to the Istio package directory. For example, if the package is istio-1.6.0
+export PATH=$PWD/bin:$PATH #Add the istioctl client to your path, The istioctl client binary in the bin/ directory.
+#precheck inspects a Kubernetes cluster for Istio install requirements
+istioctl experimental precheck #https://istio.io/docs/reference/commands/istioctl/#istioctl-experimental-precheck
+istioctl version
+istioctl manifest apply --set profile=demo #Install Istio, use the demo configuration profile
+kubectl label namespace default istio-injection=enabled #Add a namespace label to instruct Istio to automatically inject Envoy sidecar proxies when you deploy your application later
+
+#Deploy the sample application
+kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml #Deploy the Bookinfo sample application:
+kubectl get service --all-namespaces #list all services in all namespace
+kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it.
+kubectl get pods
+for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins
+ if kubectl get pods --namespace=istio-system |grep Running ; then
+ break
+ fi
+ sleep 2
+done
+kubectl get service --all-namespaces #list all services in all namespace
+
+# see if the app is running inside the cluster and serving HTML pages by checking for the page title in the response
+#error: unable to upgrade connection: container not found ("ratings")
+#kubectl exec $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o "
.*"
+#interactive shell
+#kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*"
+# - |
+# kubectl exec -it $(kubectl get pod \
+# -l app=ratings \
+# -o jsonpath='{.items[0].metadata.name}') \
+# -c ratings \
+# -- curl productpage:9080/productpage | grep -o ".*" Simple Bookstore App
+
+
+#Open the application to outside traffic
+#The Bookinfo application is deployed but not accessible from the outside. To make it accessible, you need to create an Istio Ingress Gateway, which maps a path to a route at the edge of your mesh.
+kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml #Associate this application with the Istio gateway
+istioctl analyze #Ensure that there are no issues with the configuration
+
+#Other platforms
+#Determining the ingress IP and ports
+#If the EXTERNAL-IP value is set, your environment has an external load balancer that you can use for the ingress gateway.
+#If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway.
+#access the gateway using the service’s node port.
+kubectl get svc istio-ingressgateway -n istio-system #determine if your Kubernetes cluster is running in an environment that supports external load balancers
+
+#external load balancer
+# #Follow these instructions if you have determined that your environment has an external load balancer.
+# # If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway,access the gateway using the service’s node port.
+# - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+# - export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
+# - export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
+
+# #In certain environments, the load balancer may be exposed using a host name, instead of an IP address.
+# #the ingress gateway’s EXTERNAL-IP value will not be an IP address, but rather a host name
+
+#failed to set the INGRESS_HOST environment variable, correct the INGRESS_HOST value
+ export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+
+#Follow these instructions if your environment does not have an external load balancer and choose a node port instead
+#Set the ingress ports:
+export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') #Set the ingress ports
+export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}') #Set the ingress ports
+
+#INGRESS_HOST: unbound variable
+export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL
+# echo $GATEWAY_URL #Ensure an IP address and port were successfully assigned to the environment variable
+# echo http://$GATEWAY_URL/productpage #Verify external access,retrieve the external address of the Bookinfo application
+# echo $(curl http://$GATEWAY_URL/productpage)
+
+#View the dashboard
+#istioctl dashboard kiali #optional dashboards installed by the demo installation,Access the Kiali dashboard. The default user name is admin and default password is admin
+#istioctl dashboard kiali # interactive shell
+
+
+#Uninstall
+#Cleanup #https://istio.io/latest/docs/examples/bookinfo/#cleanup
+#Delete the routing rules and terminate the application pods
+#samples/bookinfo/platform/kube/cleanup.sh
+# export ISTIORELEASE="1.6"
+# export NAMESPACE="default" #error: the path "/home/travis/build/githubfoam/kind-travisci/istio-1.6.4/bookinfo.yaml" does not exist
+# export NAMESPACE="istio-system" #error: the path "/home/travis/build/githubfoam/kind-travisci/istio-1.6.4/bookinfo.yaml" does not exist
+# /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/istio/istio/release-$ISTIORELEASE/samples/bookinfo/platform/kube/cleanup.sh)"
+#bash app/cleanup.sh #bash: app/cleanup.sh: No such file or directory
+# /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
+
+#Confirm shutdown
+# kubectl get virtualservices --namespace=istio-system #-- there should be no virtual services
+# kubectl get destinationrules --namespace=istio-system #-- there should be no destination rules
+# kubectl get gateway --namespace=istio-system #-- there should be no gateway
+# kubectl get pods --namespace=istio-system #-- the Bookinfo pods should be deleted
+
+
+# #The Istio uninstall deletes the RBAC permissions and all resources hierarchically under the istio-system namespace
+# #It is safe to ignore errors for non-existent resources because they may have been deleted hierarchically.
+# /bin/sh -eu -xv -c 'istioctl manifest generate --set profile=demo | kubectl delete -f -'
+
+#The istio-system namespace is not removed by default.
+#If no longer needed, use the following command to remove it
+ # kubectl delete namespace istio-system
diff --git a/app/deploy-istio.sh b/app/deploy-istio.sh
index e9ea597..035d0b7 100644
--- a/app/deploy-istio.sh
+++ b/app/deploy-istio.sh
@@ -7,17 +7,17 @@ set -o xtrace
#https://kind.sigs.k8s.io/docs/user/quick-start/
#https://istio.io/docs/setup/platform-setup/kind/
-echo "=============================kind istio============================================================="
-docker version
-curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64
-chmod +x ./kind
-sudo mv ./kind /usr/local/bin/kind
-kind get clusters #see the list of kind clusters
-kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind
-kind get clusters
-# - sudo snap install kubectl --classic
-kubectl config get-contexts #list the local Kubernetes contexts
-kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl
+# echo "=============================kind istio============================================================="
+# docker version
+# curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64
+# chmod +x ./kind
+# sudo mv ./kind /usr/local/bin/kind
+# kind get clusters #see the list of kind clusters
+# kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind
+# kind get clusters
+# # - sudo snap install kubectl --classic
+# kubectl config get-contexts #list the local Kubernetes contexts
+# kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl
#https://istio.io/latest/docs/setup/getting-started/
echo "===============================Install istio==========================================================="
diff --git a/app/deploy-kind-gardener.sh b/app/deploy-kind-gardener.sh
new file mode 100644
index 0000000..f63d83f
--- /dev/null
+++ b/app/deploy-kind-gardener.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+
+#https://istio.io/docs/setup/platform-setup/gardener/
+#https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md
+echo "=============================kind istio============================================================="
+curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64
+chmod +x ./kind
+
+mv ./kind /usr/local/bin/kind
+kind get clusters #see the list of kind clusters
+kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind
+kind get clusters
+
+snap install kubectl --classic
+
+kubectl config get-contexts #list the local Kubernetes contexts
+kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml #deploy Dashboard
+
+echo "===============================Waiting for Dashboard to be ready==========================================================="
+for i in {1..150}; do # Timeout after 5 minutes, 150x2=300 secs
+ if kubectl get pods --namespace=kubernetes-dashboard | grep Running ; then
+ break
+ fi
+ sleep 2
+done
+
+kubectl get pod -n kubernetes-dashboard #Verify that Dashboard is deployed and running
+kubectl create clusterrolebinding default-admin --clusterrole cluster-admin --serviceaccount=default:default #Create a ClusterRoleBinding to provide admin access to the newly created cluster
+
+#To login to Dashboard, you need a Bearer Token. Use the following command to store the token in a variable
+token=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode)
+echo $token #Display the token using the echo command and copy it to use for logging into Dashboard.
+kubectl proxy & # Access Dashboard using the kubectl command-line tool by running the following command, Starting to serve on 127.0.0.1:8001
+
+for i in {1..60}; do # Timeout after 1 mins, 60x1=60 secs
+ if nc -z -v 127.0.0.1 8001 2>&1 | grep succeeded ; then
+ break
+ fi
+ sleep 1
+done
+
+# - kind delete cluster --name istio-testing #delete the existing cluster
diff --git a/app/deploy-kubesec.sh b/app/deploy-kubesec.sh
new file mode 100644
index 0000000..8ae87bb
--- /dev/null
+++ b/app/deploy-kubesec.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+
+# echo "=============================install go============================================================="
+# export GOVERSION="1.14.4"
+# curl -O https://dl.google.com/go/go$GOVERSION.linux-amd64.tar.gz
+# tar -vf go$GOVERSION.linux-amd64.tar.gz
+# sudo mv go /usr/local
+# stat /usr/local/go
+# mkdir ~/work
+# echo "export GOPATH=$HOME/work" >> ~/.profile
+# echo "export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin" >> ~/.profile
+# source ~/.profile
+# go version
+
+echo "=============================kubesec============================================================="
+#https://github.com/controlplaneio/kubesec
+# go get -u github.com/controlplaneio/kubesec/cmd/kubesec
+
+#Command line usage
+cat < kubesec-test.yaml
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kubesec-demo
+spec:
+ containers:
+ - name: kubesec-demo
+ image: gcr.io/google-samples/node-hello:1.0
+ securityContext:
+ readOnlyRootFilesystem: true
+EOF
+kubesec scan kubesec-test.yaml
+
+#Docker usage
+docker run -i kubesec/kubesec:512c5e0 scan /dev/stdin < kubesec-test.yaml
+
+# Kubesec HTTP Server
+kubesec http 8080 &
diff --git a/app/deploy-microservices.sh b/app/deploy-microservices.sh
index eec5ade..6aee014 100644
--- a/app/deploy-microservices.sh
+++ b/app/deploy-microservices.sh
@@ -19,10 +19,19 @@ kind get clusters
# - sudo snap install kubectl --classic
kubectl config get-contexts #list the local Kubernetes contexts
kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl
+
+BASEDIR=`pwd` && echo $BASEDIR
+
+# Setup a Kubernetes Cluster
+
+# Create an environment variable to store the name of a namespace
export NAMESPACE=tutorial
+# Create the namespace
kubectl create namespace $NAMESPACE
#https://istio.io/latest/docs/setup/getting-started/
+# https://istio.io/latest/docs/examples/microservices-istio/setup-kubernetes-cluster/
+# Install Istio using the demo profile.
echo "===============================Install istio==========================================================="
#Download Istio
#/bin/sh -c 'curl -L https://istio.io/downloadIstio | sh -' #download and extract the latest release automatically (Linux or macOS)
@@ -35,76 +44,374 @@ export PATH=$PWD/bin:$PATH #Add the istioctl client to your path, The istioctl c
#precheck inspects a Kubernetes cluster for Istio install requirements
istioctl experimental precheck #https://istio.io/docs/reference/commands/istioctl/#istioctl-experimental-precheck
istioctl version
-istioctl manifest apply --set profile=demo #Install Istio, use the demo configuration profile
-kubectl label namespace default istio-injection=enabled #Add a namespace label to instruct Istio to automatically inject Envoy sidecar proxies when you deploy your application later
+#Install Istio, use the demo configuration profile
+istioctl manifest apply --set profile=demo
+
+#Add a namespace label to instruct Istio to automatically inject Envoy sidecar proxies when you deploy your application later
+kubectl label namespace default istio-injection=enabled
+
+# #Deploy the sample application
+# kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml #Deploy the Bookinfo sample application:
+# kubectl get service --all-namespaces #list all services in all namespace
+# kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it.
+# kubectl get pods
+# for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins
+# if kubectl get pods --namespace=istio-system |grep Running ; then
+# break
+# fi
+# sleep 2
+# done
+# kubectl get service --all-namespaces #list all services in all namespace
+#
+# # see if the app is running inside the cluster and serving HTML pages by checking for the page title in the response
+# #error: unable to upgrade connection: container not found ("ratings")
+# #kubectl exec $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*"
+# #interactive shell
+# #kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*"
+# # - |
+# # kubectl exec -it $(kubectl get pod \
+# # -l app=ratings \
+# # -o jsonpath='{.items[0].metadata.name}') \
+# # -c ratings \
+# # -- curl productpage:9080/productpage | grep -o ".*" Simple Bookstore App
+#
+#
+# #Open the application to outside traffic
+# #The Bookinfo application is deployed but not accessible from the outside. To make it accessible, you need to create an Istio Ingress Gateway, which maps a path to a route at the edge of your mesh.
+# kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml #Associate this application with the Istio gateway
+# istioctl analyze #Ensure that there are no issues with the configuration
+#
+# #Other platforms
+# #Determining the ingress IP and ports
+# #If the EXTERNAL-IP value is set, your environment has an external load balancer that you can use for the ingress gateway.
+# #If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway.
+# #access the gateway using the service’s node port.
+# kubectl get svc istio-ingressgateway -n istio-system #determine if your Kubernetes cluster is running in an environment that supports external load balancers
+#
+# #external load balancer
+# # #Follow these instructions if you have determined that your environment has an external load balancer.
+# # # If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway,access the gateway using the service’s node port.
+# # - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
+# # - export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
+# # - export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
+#
+# # #In certain environments, the load balancer may be exposed using a host name, instead of an IP address.
+# # #the ingress gateway’s EXTERNAL-IP value will not be an IP address, but rather a host name
+#
+# #failed to set the INGRESS_HOST environment variable, correct the INGRESS_HOST value
+# export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
+#
+# #Follow these instructions if your environment does not have an external load balancer and choose a node port instead
+# #Set the ingress ports:
+# export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') #Set the ingress ports
+# export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}') #Set the ingress ports
+#
+# #INGRESS_HOST: unbound variable
+# export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL
+# # echo $GATEWAY_URL #Ensure an IP address and port were successfully assigned to the environment variable
+# # echo http://$GATEWAY_URL/productpage #Verify external access,retrieve the external address of the Bookinfo application
+# # echo $(curl http://$GATEWAY_URL/productpage)
+#
+# #View the dashboard
+# #istioctl dashboard kiali #optional dashboards installed by the demo installation,Access the Kiali dashboard. The default user name is admin and default password is admin
+# #istioctl dashboard kiali # interactive shell
+#
+# #Enable Envoy’s access logging.
+# #https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin
+# #Deploy the sleep sample app to use as a test source for sending requests.
+# kubectl apply -f samples/sleep/sleep.yaml
+
+
+# enable Envoy’s access logging
+# Skip the clean up and delete steps, because you need the sleep application
+# https://istio.io/latest/docs/examples/microservices-istio/setup-kubernetes-cluster/
+# https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin
+echo "===============================Enable Envoy’s access logging.==========================================================="
+# Deploy the sleep sample app to use as a test source for sending requests.
+# If you have automatic sidecar injection enabled, run the following command to deploy the sample app
+
+# Otherwise, manually inject the sidecar before deploying the sleep application
+# kubectl apply -f <(istioctl kube-inject -f samples/sleep/sleep.yaml)
-#Deploy the sample application
-kubectl apply -f samples/bookinfo/platform/kube/bookinfo.yaml #Deploy the Bookinfo sample application:
-kubectl get service --all-namespaces #list all services in all namespace
-kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it.
-kubectl get pods
-for i in {1..60}; do # Timeout after 5 minutes, 60x2=120 secs, 2 mins
- if kubectl get pods --namespace=istio-system |grep Running ; then
- break
- fi
- sleep 2
-done
-kubectl get service --all-namespaces #list all services in all namespace
-
-# see if the app is running inside the cluster and serving HTML pages by checking for the page title in the response
-#error: unable to upgrade connection: container not found ("ratings")
-#kubectl exec $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*"
-#interactive shell
-#kubectl exec -it $(kubectl get pod -l app=ratings -o jsonpath='{.items[0].metadata.name}') -c ratings -- curl productpage:9080/productpage | grep -o ".*"
-# - |
-# kubectl exec -it $(kubectl get pod \
-# -l app=ratings \
-# -o jsonpath='{.items[0].metadata.name}') \
-# -c ratings \
-# -- curl productpage:9080/productpage | grep -o ".*" Simple Bookstore App
-
-
-#Open the application to outside traffic
-#The Bookinfo application is deployed but not accessible from the outside. To make it accessible, you need to create an Istio Ingress Gateway, which maps a path to a route at the edge of your mesh.
-kubectl apply -f samples/bookinfo/networking/bookinfo-gateway.yaml #Associate this application with the Istio gateway
-istioctl analyze #Ensure that there are no issues with the configuration
-
-#Other platforms
-#Determining the ingress IP and ports
-#If the EXTERNAL-IP value is set, your environment has an external load balancer that you can use for the ingress gateway.
-#If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway.
-#access the gateway using the service’s node port.
-kubectl get svc istio-ingressgateway -n istio-system #determine if your Kubernetes cluster is running in an environment that supports external load balancers
-
-#external load balancer
-# #Follow these instructions if you have determined that your environment has an external load balancer.
-# # If the EXTERNAL-IP value is (or perpetually ), your environment does not provide an external load balancer for the ingress gateway,access the gateway using the service’s node port.
-# - export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
-# - export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
-# - export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
-
-# #In certain environments, the load balancer may be exposed using a host name, instead of an IP address.
-# #the ingress gateway’s EXTERNAL-IP value will not be an IP address, but rather a host name
-
-#failed to set the INGRESS_HOST environment variable, correct the INGRESS_HOST value
- export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
-
-#Follow these instructions if your environment does not have an external load balancer and choose a node port instead
-#Set the ingress ports:
-export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}') #Set the ingress ports
-export SECURE_INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}') #Set the ingress ports
-
-#INGRESS_HOST: unbound variable
-export GATEWAY_URL=$INGRESS_HOST:$INGRESS_PORT #Set GATEWAY_URL
-# echo $GATEWAY_URL #Ensure an IP address and port were successfully assigned to the environment variable
-# echo http://$GATEWAY_URL/productpage #Verify external access,retrieve the external address of the Bookinfo application
-# echo $(curl http://$GATEWAY_URL/productpage)
-
-#View the dashboard
-#istioctl dashboard kiali #optional dashboards installed by the demo installation,Access the Kiali dashboard. The default user name is admin and default password is admin
-#istioctl dashboard kiali # interactive shell
-
-#Enable Envoy’s access logging.
-#https://istio.io/latest/docs/tasks/observability/logs/access-log/#before-you-begin
-#Deploy the sleep sample app to use as a test source for sending requests.
kubectl apply -f samples/sleep/sleep.yaml
+# kubectl apply -f https://github.com/istio/istio/tree/release-1.6/samples/sleep
+
+# Set the SOURCE_POD environment variable to the name of your source pod:
+export SOURCE_POD=$(kubectl get pod -l app=sleep -o jsonpath={.items..metadata.name})
+
+# Start the httpbin sample.
+# If you have enabled automatic sidecar injection, deploy the httpbin service
+
+# Otherwise, you have to manually inject the sidecar before deploying the httpbin application
+# kubectl apply -f <(istioctl kube-inject -f samples/httpbin/httpbin.yaml)
+
+kubectl apply -f samples/httpbin/httpbin.yaml
+
+
+# Enable Envoy’s access logging
+# Install Istio using the demo profile.
+# replace demo with the name of the profile you used when you installed Istio
+istioctl install --set profile=demo --set meshConfig.accessLogFile="/dev/stdout"
+
+# Test the access log
+# connect to 10.110.95.100 port 8000 failed: Connection refused
+# kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl -v httpbin:8000/status/418
+
+kubectl get pods --all-namespaces
+echo echo "Waiting for sleep and httpbin to be ready ..."
+for i in {1..150}; do # Timeout after 5 minutes, 60x5=300 secs
+ if kubectl get pods --namespace=default | grep PodInitializing ; then
+ sleep 10
+ else
+ break
+ fi
+done
+echo echo "Waiting for kubernetes be ready ..."
+for i in {1..150}; do # Timeout after 5 minutes, 60x5=300 secs
+ if kubectl get pods --namespace=istio-system | grep ContainerCreating ; then
+ sleep 10
+ else
+ break
+ fi
+done
+kubectl get pods --all-namespaces
+
+# Check sleep’s log
+kubectl logs -l app=sleep -c istio-proxy
+# Check httpbin’s log
+kubectl logs -l app=httpbin -c istio-proxy #2020-07-08T18:15:02.910663Z info Envoy proxy is ready
+
+
+# https://istio.io/latest/docs/examples/microservices-istio/setup-kubernetes-cluster/
+# Create a Kubernetes Ingress resource for these common Istio services using the kubectl
+# Grafana
+# Jaeger
+# Prometheus
+# Kiali
+# The kubectl command can accept an in-line configuration to create the Ingress resources for each service
+kubectl apply -f - <
+
+
+
+# Run Bookinfo with Kubernetes
+# how to deploy the whole application to a Kubernetes cluster.
+# https://istio.io/latest/docs/examples/microservices-istio/bookinfo-kubernetes/
+
+
+# Deploy the application and a testing pod
+# Set the MYHOST environment variable to hold the URL of the application
+export MYHOST=$(kubectl config view -o jsonpath={.contexts..namespace}).bookinfo.com
+
+# Deploy the application to your Kubernetes cluster
+kubectl apply -l version!=v2,version!=v3 -f https://raw.githubusercontent.com/istio/istio/release-1.6/samples/bookinfo/platform/kube/bookinfo.yaml
+
+# Check the status of the pods
+kubectl get pods
+
+
+# After the four services achieve the Running status, you can scale the deployment
+# each version of each microservice run in three pods
+kubectl scale deployments --all --replicas 3
+
+# Check the status of the pods
+kubectl get pods
+
+# After the services achieve the Running status, deploy a testing pod, sleep, to use for sending requests to your microservices
+kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.6/samples/sleep/sleep.yaml
+
+# confirm that the Bookinfo application is running, send a request to it with a curl command from your testing pod:
+# interactive shell
+# kubectl exec -it $(kubectl get pod -l app=sleep -o jsonpath='{.items[0].metadata.name}') -c sleep -- curl productpage:9080/productpage | grep -o ".*"
+
+
+# Enable external access to the application
diff --git a/app/deploy-openfaas.sh b/app/deploy-openfaas.sh
new file mode 100644
index 0000000..d55faad
--- /dev/null
+++ b/app/deploy-openfaas.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+set -eox pipefail #safety for script
+
+# https://itnext.io/deploy-your-first-serverless-function-to-kubernetes-232307f7b0a9
+echo "============================OpenFaaS =============================================================="
+# `curl -sSLf https://cli.openfaas.com | sh` #install the OpenFaaS CLI
+# `curl -sSLf https://dl.get-arkade.dev | sh` #install arkade
+
+/bin/sh -c 'curl -sSLf https://cli.openfaas.com | sh' ##install the OpenFaaS CLI
+/bin/sh -c 'curl -sSLf https://dl.get-arkade.dev | sh' ##install arkade
+
+arkade install openfaas #use arkade to install OpenFaaS
+arkade info openfaas
+
+# Forward the gateway to your machine
+kubectl rollout status -n openfaas deploy/gateway
+kubectl port-forward -n openfaas svc/gateway 8080:8080 &
+
+# Now log in using the CLI
+# PASSWORD=$(kubectl get secret -n openfaas basic-auth -o jsonpath="{.data.basic-auth-password}" | base64 --decode; echo)
+# interactive shell
+# echo -n $PASSWORD | faas-cli login --username admin --password-stdin
+
+# faas-cli store deploy nodeinfo
+# Check to see "Ready" status
+# faas-cli describe nodeinfo# Invoke
+# echo | faas-cli invoke nodeinfo
+# echo | faas-cli invoke nodeinfo --async
+
+# curl http://localhost:8080
+# Get the password so you can open the UI
+# echo $PASSWORD
+
+# faas-cli template store list
+# Sign up for a Docker Hub account, so that you can store your functions for free.
+# export OPENFAAS_PREFIX="DOCKER_HUB_USERNAME"
+# export OPENFAAS_PREFIX=$DOCKER_USERNAME #travisci env var
+# faas-cli new --lang python3 serverless
diff --git a/app/deploy-voting-app.sh b/app/deploy-voting-app.sh
new file mode 100644
index 0000000..1cc0313
--- /dev/null
+++ b/app/deploy-voting-app.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+echo "===============================deploy voting app==========================================================="
+
+cd app/voting
+kubectl create namespace vote
+kubectl create -f deployments/
+kubectl create -f services/
+# minikube addons enable ingress
+kubectl apply -f demo-ingress.yaml
+kubectl --namespace=vote get ingress
+
+# Add the following line to the bottom of the /etc/hosts file
+# demo-kubernetes.info
+
+
+
+#Deploy the sample application
+kubectl get service --all-namespaces #list all services in all namespace
+kubectl get services #The application will start. As each pod becomes ready, the Istio sidecar will deploy along with it.
+kubectl get pods
+
+for i in {1..60}; do # Timeout after 5 minutes, 60x5=300 secs, 3 mins
+ if kubectl get pods --namespace=vote |grep ContainerCreating ; then
+ sleep 5
+ else
+ break
+ fi
+done
+
+kubectl get service --all-namespaces #list all services in all namespace
+# Verify your installation
+kubectl get pod -n vote
+
+kubectl delete namespace vote
\ No newline at end of file
diff --git a/app/fluentd-daemonset-elasticsearch.yaml b/app/fluentd-daemonset-elasticsearch.yaml
new file mode 100644
index 0000000..12873eb
--- /dev/null
+++ b/app/fluentd-daemonset-elasticsearch.yaml
@@ -0,0 +1,73 @@
+# src: https://github.com/fluent/fluentd-kubernetes-daemonset/blob/master/fluentd-daemonset-elasticsearch.yaml
+apiVersion: apps/v1
+kind: DaemonSet
+metadata:
+ name: fluentd
+ # namespace: kube-system
+ labels:
+ k8s-app: fluentd-logging
+ version: v1
+spec:
+ selector:
+ matchLabels:
+ k8s-app: fluentd-logging
+ version: v1
+ template:
+ metadata:
+ labels:
+ k8s-app: fluentd-logging
+ version: v1
+ spec:
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ effect: NoSchedule
+ containers:
+ - name: fluentd
+ image: fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch
+ env:
+ - name: FLUENT_ELASTICSEARCH_HOST
+ value: "elasticsearch-client"
+ - name: FLUENT_ELASTICSEARCH_PORT
+ value: "9200"
+ - name: FLUENT_ELASTICSEARCH_SCHEME
+ value: "http"
+ # Option to configure elasticsearch plugin with self signed certs
+ # ================================================================
+ - name: FLUENT_ELASTICSEARCH_SSL_VERIFY
+ value: "false" # changed by me
+ # Option to configure elasticsearch plugin with tls
+ # ================================================================
+ - name: FLUENT_ELASTICSEARCH_SSL_VERSION
+ value: "TLSv1_2"
+ # X-Pack Authentication
+ # =====================
+ - name: FLUENT_ELASTICSEARCH_USER
+ value: "elastic"
+ - name: FLUENT_ELASTICSEARCH_PASSWORD
+ value: "changeme"
+ # Logz.io Authentication
+ # ======================
+ - name: LOGZIO_TOKEN
+ value: "ThisIsASuperLongToken"
+ - name: LOGZIO_LOGTYPE
+ value: "kubernetes"
+ resources:
+ limits:
+ memory: 200Mi
+ requests:
+ cpu: 100m
+ memory: 200Mi
+ volumeMounts:
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ terminationGracePeriodSeconds: 30
+ volumes:
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
\ No newline at end of file
diff --git a/app/kibana-values.yaml b/app/kibana-values.yaml
new file mode 100644
index 0000000..aaf6e48
--- /dev/null
+++ b/app/kibana-values.yaml
@@ -0,0 +1,10 @@
+files:
+ kibana.yml:
+ ## Default Kibana configuration from kibana-docker.
+ server.name: kibana
+ server.host: "0"
+ ## For kibana < 6.6, use elasticsearch.url instead
+ elasticsearch.hosts: http://elasticsearch-client:9200
+
+service:
+ type: LoadBalancer # ClusterIP
\ No newline at end of file
diff --git a/app/kind_gardener.sh b/app/kind_gardener.sh
new file mode 100644
index 0000000..4695f9d
--- /dev/null
+++ b/app/kind_gardener.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+
+#https://istio.io/docs/setup/platform-setup/gardener/
+#https://github.com/gardener/gardener/blob/master/docs/development/local_setup.md
+curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.8.1/kind-$(uname)-amd64
+chmod +x ./kind
+mv ./kind /usr/local/bin/kind
+kind get clusters #see the list of kind clusters
+kind create cluster --name istio-testing #Create a cluster,By default, the cluster will be given the name kind
+kind get clusters
+snap install kubectl --classic
+kubectl config get-contexts #list the local Kubernetes contexts
+kubectl config use-context kind-istio-testing #run following command to set the current context for kubectl
+kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta8/aio/deploy/recommended.yaml #deploy Dashboard
+echo "===============================Waiting for Dashboard to be ready==========================================================="
+|
+ for i in {1..150}; do # Timeout after 5 minutes, 150x2=300 secs
+ if kubectl get pods --namespace=kubernetes-dashboard | grep Running ; then
+ break
+ fi
+ sleep 2
+ done
+kubectl get pod -n kubernetes-dashboard #Verify that Dashboard is deployed and running
+kubectl create clusterrolebinding default-admin --clusterrole cluster-admin --serviceaccount=default:default #Create a ClusterRoleBinding to provide admin access to the newly created cluster
+ #To login to Dashboard, you need a Bearer Token. Use the following command to store the token in a variable
+token=$(kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='default')].data.token}"|base64 --decode)
+echo $token #Display the token using the echo command and copy it to use for logging into Dashboard.
+kubectl proxy & # Access Dashboard using the kubectl command-line tool by running the following command, Starting to serve on 127.0.0.1:8001
+|
+ for i in {1..60}; do # Timeout after 1 mins, 60x1=60 secs
+ if nc -z -v 127.0.0.1 8001 2>&1 | grep succeeded ; then
+ break
+ fi
+ sleep 1
+ done
+ # - kind delete cluster --name istio-testing #delete the existing cluster
diff --git a/app/provision-helm.sh b/app/provision-helm.sh
new file mode 100644
index 0000000..b74c84c
--- /dev/null
+++ b/app/provision-helm.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+
+echo fetch and install helm...
+HELM_ARCHIVE=helm-v2.12.1-linux-amd64.tar.gz
+HELM_DIR=linux-amd64
+HELM_BIN=$HELM_DIR/helm
+curl -LsO https://storage.googleapis.com/kubernetes-helm/$HELM_ARCHIVE && tar -zxvf $HELM_ARCHIVE && chmod +x $HELM_BIN && cp $HELM_BIN /usr/local/bin
+rm $HELM_ARCHIVE
+rm -rf $HELM_DIR
+
+helm version
+
+echo setup tiller account...
+kubectl -n kube-system create sa tiller && kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
+
+echo initialize tiller...
+helm init --wait --skip-refresh --upgrade --service-account tiller
+echo tiller initialized
\ No newline at end of file
diff --git a/app/provision-kubectl.sh b/app/provision-kubectl.sh
new file mode 100644
index 0000000..e4f2c21
--- /dev/null
+++ b/app/provision-kubectl.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+set -o errexit
+set -o pipefail
+set -o nounset
+set -o xtrace
+# set -eox pipefail #safety for script
+
+echo fetch and install kubectl...
+curl -LsO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/kubectl
+
+kubectl version --client
\ No newline at end of file
diff --git a/app/sample-elastic-metricbeat.yaml b/app/sample-elastic-metricbeat.yaml
new file mode 100644
index 0000000..22c5fd9
--- /dev/null
+++ b/app/sample-elastic-metricbeat.yaml
@@ -0,0 +1,30 @@
+image:
+ repository: docker.elastic.co/beats/metricbeat-oss
+daemonset:
+ config:
+ output.file: false
+ output.elasticsearch:
+ hosts: ["elastic-stack-elasticsearch-client:9200"]
+ modules:
+ kubernetes:
+ config:
+ - module: kubernetes
+ metricsets:
+ - node
+ - system
+ - pod
+ - container
+ - volume
+ period: 10s
+ host: ${NODE_NAME}
+ hosts: ["https://${HOSTNAME}:10250"]
+ bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+ ssl.verification_mode: "none"
+deployment:
+ config:
+ output.file: false
+ output.elasticsearch:
+ hosts: ["elastic-stack-elasticsearch-client:9200"]
+ setup.kibana:
+ host: "elastic-stack-kibana:443"
+ setup.dashboards.enabled: true
\ No newline at end of file
diff --git a/app/sample-elastic-stack.yaml b/app/sample-elastic-stack.yaml
new file mode 100644
index 0000000..03502dc
--- /dev/null
+++ b/app/sample-elastic-stack.yaml
@@ -0,0 +1,23 @@
+logstash:
+ enabled: true
+ elasticsearch:
+ host: elastic-stack-elasticsearch-client
+
+filebeat:
+ enabled: true
+ config:
+ input:
+ type: container
+ paths:
+ - /var/log/containers/*.log
+ processors:
+ - add_kubernetes_metadata:
+ host: ${NODE_NAME}
+ matchers:
+ - logs_path:
+ logs_path: "/var/log/containers/"
+ output.file.enabled: false
+ output.logstash:
+ hosts: ["elastic-stack-logstash:5044"]
+ indexTemplateLoad:
+ - elastic-stack-elasticsearch-client:9200
\ No newline at end of file
diff --git a/app/voting/demo-ingress.yaml b/app/voting/demo-ingress.yaml
new file mode 100644
index 0000000..3c90565
--- /dev/null
+++ b/app/voting/demo-ingress.yaml
@@ -0,0 +1,28 @@
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: demo-vote-ingress
+ namespace: vote
+ annotations:
+ kubernetes.io/ingress.class: nginx
+ nginx.ingress.kubernetes.io/rewrite-target: /$1
+ nginx.ingress.kubernetes.io/configuration-snippet: |
+ location ~ \.css {
+ add_header Content-Type text/css;
+ }
+ location ~ \.js {
+ add_header Content-Type application/x-javascript;
+ }
+spec:
+ rules:
+ - host: demo-kubernetes.info
+ http:
+ paths:
+ - path: /vote/*
+ backend:
+ serviceName: vote
+ servicePort: 5000
+ - path: /static/*
+ backend:
+ serviceName: vote
+ servicePort: 5000
\ No newline at end of file
diff --git a/app/voting/deployments/db-deployment.yaml b/app/voting/deployments/db-deployment.yaml
new file mode 100644
index 0000000..7fb0e9b
--- /dev/null
+++ b/app/voting/deployments/db-deployment.yaml
@@ -0,0 +1,34 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: db
+ name: db
+ namespace: vote
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: db
+ template:
+ metadata:
+ labels:
+ app: db
+ spec:
+ containers:
+ - image: postgres:9.4
+ name: postgres
+ env:
+ - name: POSTGRES_USER
+ value: postgres
+ - name: POSTGRES_PASSWORD
+ value: postgres
+ ports:
+ - containerPort: 5432
+ name: postgres
+ volumeMounts:
+ - mountPath: /var/lib/postgresql/data
+ name: db-data
+ volumes:
+ - name: db-data
+ emptyDir: {}
\ No newline at end of file
diff --git a/app/voting/deployments/nginx-deployment.yml b/app/voting/deployments/nginx-deployment.yml
new file mode 100644
index 0000000..e3b0a41
--- /dev/null
+++ b/app/voting/deployments/nginx-deployment.yml
@@ -0,0 +1,20 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: nginx
+ namespace: vote
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: nginx
+ template:
+ metadata:
+ labels:
+ app: nginx
+ spec:
+ containers:
+ - name: nginx
+ image: nginx:1.7.9
+ ports:
+ - containerPort: 80
\ No newline at end of file
diff --git a/app/voting/deployments/redis-deployment.yaml b/app/voting/deployments/redis-deployment.yaml
new file mode 100644
index 0000000..f2b695d
--- /dev/null
+++ b/app/voting/deployments/redis-deployment.yaml
@@ -0,0 +1,29 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: redis
+ name: redis
+ namespace: vote
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: redis
+ template:
+ metadata:
+ labels:
+ app: redis
+ spec:
+ containers:
+ - image: redis:alpine
+ name: redis
+ ports:
+ - containerPort: 6379
+ name: redis
+ volumeMounts:
+ - mountPath: /data
+ name: redis-data
+ volumes:
+ - name: redis-data
+ emptyDir: {}
\ No newline at end of file
diff --git a/app/voting/deployments/result-deployment.yaml b/app/voting/deployments/result-deployment.yaml
new file mode 100644
index 0000000..5f94703
--- /dev/null
+++ b/app/voting/deployments/result-deployment.yaml
@@ -0,0 +1,23 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: result
+ name: result
+ namespace: vote
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: result
+ template:
+ metadata:
+ labels:
+ app: result
+ spec:
+ containers:
+ - image: dockersamples/examplevotingapp_result:before
+ name: result
+ ports:
+ - containerPort: 80
+ name: result
\ No newline at end of file
diff --git a/app/voting/deployments/vote-deployment.yaml b/app/voting/deployments/vote-deployment.yaml
new file mode 100644
index 0000000..fb97cb3
--- /dev/null
+++ b/app/voting/deployments/vote-deployment.yaml
@@ -0,0 +1,23 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: vote
+ name: vote
+ namespace: vote
+spec:
+ replicas: 2
+ selector:
+ matchLabels:
+ app: vote
+ template:
+ metadata:
+ labels:
+ app: vote
+ spec:
+ containers:
+ - image: dockersamples/examplevotingapp_vote
+ name: vote
+ ports:
+ - containerPort: 80
+ name: vote
\ No newline at end of file
diff --git a/app/voting/deployments/worker-deployment.yaml b/app/voting/deployments/worker-deployment.yaml
new file mode 100644
index 0000000..b785148
--- /dev/null
+++ b/app/voting/deployments/worker-deployment.yaml
@@ -0,0 +1,20 @@
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ labels:
+ app: worker
+ name: worker
+ namespace: vote
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app: worker
+ template:
+ metadata:
+ labels:
+ app: worker
+ spec:
+ containers:
+ - image: dockersamples/examplevotingapp_worker
+ name: worker
\ No newline at end of file
diff --git a/app/voting/services/db-service.yaml b/app/voting/services/db-service.yaml
new file mode 100644
index 0000000..e12636f
--- /dev/null
+++ b/app/voting/services/db-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: db
+ name: db
+ namespace: vote
+spec:
+ type: ClusterIP
+ ports:
+ - name: "db-service"
+ port: 5432
+ targetPort: 5432
+ selector:
+ app: db
\ No newline at end of file
diff --git a/app/voting/services/redis-service.yaml b/app/voting/services/redis-service.yaml
new file mode 100644
index 0000000..99071ba
--- /dev/null
+++ b/app/voting/services/redis-service.yaml
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: redis
+ name: redis
+ namespace: vote
+spec:
+ type: ClusterIP
+ ports:
+ - name: "redis-service"
+ port: 6379
+ targetPort: 6379
+ selector:
+ app: redis
\ No newline at end of file
diff --git a/app/voting/services/result-service.yaml b/app/voting/services/result-service.yaml
new file mode 100644
index 0000000..6a5024e
--- /dev/null
+++ b/app/voting/services/result-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: result
+ name: result
+ namespace: vote
+spec:
+ type: NodePort
+ ports:
+ - name: "result-service"
+ port: 5001
+ targetPort: 80
+ nodePort: 31001
+ selector:
+ app: result
\ No newline at end of file
diff --git a/app/voting/services/vote-service.yaml b/app/voting/services/vote-service.yaml
new file mode 100644
index 0000000..5034a72
--- /dev/null
+++ b/app/voting/services/vote-service.yaml
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: Service
+metadata:
+ labels:
+ app: vote
+ name: vote
+ namespace: vote
+spec:
+ type: NodePort
+ ports:
+ - name: "vote-service"
+ port: 5000
+ targetPort: 80
+ nodePort: 31000
+ selector:
+ app: vote
\ No newline at end of file
diff --git a/tutorial-user-config.yaml b/tutorial-user-config.yaml
new file mode 100644
index 0000000..6906254
--- /dev/null
+++ b/tutorial-user-config.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Config
+preferences: {}
+
+clusters:
+- cluster:
+ certificate-authority-data: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath='{.data.ca\.crt}')
+ server: $(kubectl config view -o jsonpath="{.clusters[?(.name==\"$(kubectl config view -o jsonpath="{.contexts[?(.name==\"$(kubectl config current-context)\")].context.cluster}")\")].cluster.server}")
+ name: ${NAMESPACE}-cluster
+
+users:
+- name: ${NAMESPACE}-user
+ user:
+ as-user-extra: {}
+ client-key-data: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath='{.data.ca\.crt}')
+ token: $(kubectl get secret $(kubectl get sa ${NAMESPACE}-user -n $NAMESPACE -o jsonpath={.secrets..name}) -n $NAMESPACE -o jsonpath={.data.token} | base64 --decode)
+
+contexts:
+- context:
+ cluster: ${NAMESPACE}-cluster
+ namespace: ${NAMESPACE}
+ user: ${NAMESPACE}-user
+ name: ${NAMESPACE}
+
+current-context: ${NAMESPACE}
+
pFad - Phonifier reborn
Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.
Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.
Alternative Proxies:
Alternative Proxy
pFad Proxy
pFad v3 Proxy
pFad v4 Proxy