diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 3ce87f39d..3a63e98c6 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -36,4 +36,10 @@ python3.6 -m pip uninstall --yes --quiet nox-automation python3.6 -m pip install --upgrade --quiet nox python3.6 -m nox --version -python3.6 -m nox +# If NOX_SESSION is set, it only runs the specified session, +# otherwise run all the sessions. +if [[ -n "${NOX_SESSION:-}" ]]; then + python3.6 -m nox -s "${NOX_SESSION:-}" +else + python3.6 -m nox +fi diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile new file mode 100644 index 000000000..412b0b56a --- /dev/null +++ b/.kokoro/docker/docs/Dockerfile @@ -0,0 +1,98 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ubuntu:20.04 + +ENV DEBIAN_FRONTEND noninteractive + +# Ensure local Python is preferred over distribution Python. +ENV PATH /usr/local/bin:$PATH + +# Install dependencies. +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + apt-transport-https \ + build-essential \ + ca-certificates \ + curl \ + dirmngr \ + git \ + gpg-agent \ + graphviz \ + libbz2-dev \ + libdb5.3-dev \ + libexpat1-dev \ + libffi-dev \ + liblzma-dev \ + libreadline-dev \ + libsnappy-dev \ + libssl-dev \ + libsqlite3-dev \ + portaudio19-dev \ + redis-server \ + software-properties-common \ + ssh \ + sudo \ + tcl \ + tcl-dev \ + tk \ + tk-dev \ + uuid-dev \ + wget \ + zlib1g-dev \ + && add-apt-repository universe \ + && apt-get update \ + && apt-get -y install jq \ + && apt-get clean autoclean \ + && apt-get autoremove -y \ + && rm -rf /var/lib/apt/lists/* \ + && rm -f /var/cache/apt/archives/*.deb + + +COPY fetch_gpg_keys.sh /tmp +# Install the desired versions of Python. +RUN set -ex \ + && export GNUPGHOME="$(mktemp -d)" \ + && echo "disable-ipv6" >> "${GNUPGHOME}/dirmngr.conf" \ + && /tmp/fetch_gpg_keys.sh \ + && for PYTHON_VERSION in 3.7.8 3.8.5; do \ + wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz" \ + && wget --no-check-certificate -O python-${PYTHON_VERSION}.tar.xz.asc "https://www.python.org/ftp/python/${PYTHON_VERSION%%[a-z]*}/Python-$PYTHON_VERSION.tar.xz.asc" \ + && gpg --batch --verify python-${PYTHON_VERSION}.tar.xz.asc python-${PYTHON_VERSION}.tar.xz \ + && rm -r python-${PYTHON_VERSION}.tar.xz.asc \ + && mkdir -p /usr/src/python-${PYTHON_VERSION} \ + && tar -xJC /usr/src/python-${PYTHON_VERSION} --strip-components=1 -f python-${PYTHON_VERSION}.tar.xz \ + && rm python-${PYTHON_VERSION}.tar.xz \ + && cd /usr/src/python-${PYTHON_VERSION} \ + && ./configure \ + --enable-shared \ + # This works only on Python 2.7 and throws a warning on every other + # version, but seems otherwise harmless. + --enable-unicode=ucs4 \ + --with-system-ffi \ + --without-ensurepip \ + && make -j$(nproc) \ + && make install \ + && ldconfig \ + ; done \ + && rm -rf "${GNUPGHOME}" \ + && rm -rf /usr/src/python* \ + && rm -rf ~/.cache/ + +RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ + && python3.7 /tmp/get-pip.py \ + && python3.8 /tmp/get-pip.py \ + && rm /tmp/get-pip.py + +CMD ["python3.7"] diff --git a/.kokoro/docker/docs/fetch_gpg_keys.sh b/.kokoro/docker/docs/fetch_gpg_keys.sh new file mode 100755 index 000000000..d653dd868 --- /dev/null +++ b/.kokoro/docker/docs/fetch_gpg_keys.sh @@ -0,0 +1,45 @@ +#!/bin/bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# A script to fetch gpg keys with retry. +# Avoid jinja parsing the file. +# + +function retry { + if [[ "${#}" -le 1 ]]; then + echo "Usage: ${0} retry_count commands.." + exit 1 + fi + local retries=${1} + local command="${@:2}" + until [[ "${retries}" -le 0 ]]; do + $command && return 0 + if [[ $? -ne 0 ]]; then + echo "command failed, retrying" + ((retries--)) + fi + done + return 1 +} + +# 3.6.9, 3.7.5 (Ned Deily) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + 0D96DF4D4110E5C43FBFB17F2D347EA6AA65421D + +# 3.8.0 (Ɓukasz Langa) +retry 3 gpg --keyserver ha.pool.sks-keyservers.net --recv-keys \ + E3FF2839C048B25C084DEBE9B26995E310250568 + +# diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg index e49c23215..24c8c89dd 100644 --- a/.kokoro/docs/common.cfg +++ b/.kokoro/docs/common.cfg @@ -11,12 +11,12 @@ action { gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" # Use the trampoline script to run in docker. -build_file: "google-auth-library-python/.kokoro/trampoline.sh" +build_file: "google-auth-library-python/.kokoro/trampoline_v2.sh" # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" + value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" } env_vars: { key: "TRAMPOLINE_BUILD_FILE" @@ -28,6 +28,23 @@ env_vars: { value: "docs-staging" } +env_vars: { + key: "V2_STAGING_BUCKET" + value: "docs-staging-v2" +} + +# It will upload the docker image after successful builds. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "true" +} + +# It will always build the docker image. +env_vars: { + key: "TRAMPOLINE_DOCKERFILE" + value: ".kokoro/docker/docs/Dockerfile" +} + # Fetch the token needed for reporting release status to GitHub before_action { fetch_keystore { diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg new file mode 100644 index 000000000..111810782 --- /dev/null +++ b/.kokoro/docs/docs-presubmit.cfg @@ -0,0 +1,17 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "STAGING_BUCKET" + value: "gcloud-python-test" +} + +env_vars: { + key: "V2_STAGING_BUCKET" + value: "gcloud-python-test" +} + +# We only upload the image in the main `docs` build. +env_vars: { + key: "TRAMPOLINE_IMAGE_UPLOAD" + value: "false" +} diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh new file mode 100755 index 000000000..f52514257 --- /dev/null +++ b/.kokoro/populate-secrets.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + + +# Populates requested secrets set in SECRET_MANAGER_KEYS from service account: +# kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com +SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + docker run --entrypoint=gcloud \ + --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ + gcr.io/google.com/cloudsdktool/cloud-sdk \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret ${key} > \ + "${SECRET_LOCATION}/${key}" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + fi +done diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 0e5d97867..8acb14e80 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -18,26 +18,16 @@ set -eo pipefail # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 -cd github/google-auth-library-python - -# Remove old nox -python3.6 -m pip uninstall --yes --quiet nox-automation +export PATH="${HOME}/.local/bin:${PATH}" # Install nox -python3.6 -m pip install --upgrade --quiet nox -python3.6 -m nox --version +python3 -m pip install --user --upgrade --quiet nox +python3 -m nox --version # build docs nox -s docs -python3 -m pip install gcp-docuploader - -# install a json parser -sudo apt-get update -sudo apt-get -y install software-properties-common -sudo add-apt-repository universe -sudo apt-get update -sudo apt-get -y install jq +python3 -m pip install --user gcp-docuploader # create metadata python3 -m docuploader create-metadata \ @@ -52,4 +42,23 @@ python3 -m docuploader create-metadata \ cat docs.metadata # upload docs -python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket docs-staging +python3 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" + + +# docfx yaml files +nox -s docfx + +# create metadata. +python3 -m docuploader create-metadata \ + --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ + --version=$(python3 setup.py --version) \ + --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ + --distribution-name=$(python3 setup.py --name) \ + --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ + --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ + --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) + +cat docs.metadata + +# upload docs +python3 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index b2088d009..b56ca902d 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -23,42 +23,18 @@ env_vars: { value: "github/google-auth-library-python/.kokoro/release.sh" } -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google_cloud_pypi_password" - } - } -} - -# Fetch magictoken to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "releasetool-magictoken" - } - } +# Fetch PyPI password +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_pypi_password" + } + } } -# Fetch api key to use with Magic Github Proxy -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "magic-github-proxy-api-key" - } - } -} +# Tokens needed to report release status back to GitHub +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg index 792bc4bbe..4895c2bcf 100644 --- a/.kokoro/samples/python3.6/common.cfg +++ b/.kokoro/samples/python3.6/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.6" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py36" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/google-auth-library-python/.kokoro/test-samples.sh" diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg index 209f6cef9..90aaef1b4 100644 --- a/.kokoro/samples/python3.7/common.cfg +++ b/.kokoro/samples/python3.7/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.7" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py37" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/google-auth-library-python/.kokoro/test-samples.sh" diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg index b0095dabd..78fd8c749 100644 --- a/.kokoro/samples/python3.8/common.cfg +++ b/.kokoro/samples/python3.8/common.cfg @@ -13,6 +13,12 @@ env_vars: { value: "py-3.8" } +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-py38" +} + env_vars: { key: "TRAMPOLINE_BUILD_FILE" value: "github/google-auth-library-python/.kokoro/test-samples.sh" diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index f4426f67a..9a9de2086 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -28,6 +28,12 @@ if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then git checkout $LATEST_RELEASE fi +# Exit early if samples directory doesn't exist +if [ ! -d "./samples" ]; then + echo "No tests run. `./samples` not found" + exit 0 +fi + # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -101,4 +107,4 @@ cd "$ROOT" # Workaround for Kokoro permissions issue: delete secrets rm testing/{test-env.sh,client-secrets.json,service-account.json} -exit "$RTN" \ No newline at end of file +exit "$RTN" diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index e8c4251f3..f39236e94 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -15,9 +15,14 @@ set -eo pipefail -python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" || ret_code=$? +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT -chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh -${KOKORO_GFILE_DIR}/trampoline_cleanup.sh || true - -exit ${ret_code} +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" \ No newline at end of file diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh new file mode 100755 index 000000000..719bcd5ba --- /dev/null +++ b/.kokoro/trampoline_v2.sh @@ -0,0 +1,487 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# To run this script, first download few files from gcs to /dev/shm. +# (/dev/shm is passed into the container as KOKORO_GFILE_DIR). +# +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/secrets_viewer_service_account.json /dev/shm +# gsutil cp gs://cloud-devrel-kokoro-resources/python-docs-samples/automl_secrets.txt /dev/shm +# +# Then run the script. +# .kokoro/trampoline_v2.sh +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. + + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.5" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For Build Cop Bot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}:latest" | grep "${TRAMPOLINE_IMAGE}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/CHANGELOG.md b/CHANGELOG.md index d05e4e7d5..1e5950234 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,20 @@ [1]: https://pypi.org/project/google-auth/#history +## [1.23.0](https://www.github.com/googleapis/google-auth-library-python/compare/v1.22.1...v1.23.0) (2020-10-29) + + +### Features + +* Add custom scopes for access tokens from the metadata service ([#633](https://www.github.com/googleapis/google-auth-library-python/issues/633)) ([0323cf3](https://www.github.com/googleapis/google-auth-library-python/commit/0323cf390b16e8483660ac88775e8ea4e7f7702d)) + + +### Bug Fixes + +* **deps:** Revert "fix: pin 'aoihttp < 3.7.0dev' ([#634](https://www.github.com/googleapis/google-auth-library-python/issues/634))" ([#632](https://www.github.com/googleapis/google-auth-library-python/issues/632)) ([#640](https://www.github.com/googleapis/google-auth-library-python/issues/640)) ([b790e65](https://www.github.com/googleapis/google-auth-library-python/commit/b790e6535cc37591b23866027a426cde312e07c1)) +* pin 'aoihttp < 3.7.0dev' ([#634](https://www.github.com/googleapis/google-auth-library-python/issues/634)) ([05f9524](https://www.github.com/googleapis/google-auth-library-python/commit/05f95246fab928fe2f445781117eeac8088497fb)) +* remove checks for ancient versions of Cryptography ([#596](https://www.github.com/googleapis/google-auth-library-python/issues/596)) ([6407258](https://www.github.com/googleapis/google-auth-library-python/commit/6407258956ec42e3b722418cb7f366e5ae9272ec)), closes [/github.com/googleapis/google-auth-library-python/issues/595#issuecomment-683903062](https://www.github.com/googleapis//github.com/googleapis/google-auth-library-python/issues/595/issues/issuecomment-683903062) + ### [1.22.1](https://www.github.com/googleapis/google-auth-library-python/compare/v1.22.0...v1.22.1) (2020-10-05) diff --git a/google/auth/_default.py b/google/auth/_default.py index de81c5b2c..43778931a 100644 --- a/google/auth/_default.py +++ b/google/auth/_default.py @@ -274,10 +274,11 @@ def default(scopes=None, request=None, quota_project_id=None): gcloud config set project 3. If the application is running in the `App Engine standard environment`_ - then the credentials and project ID from the `App Identity Service`_ - are used. - 4. If the application is running in `Compute Engine`_ or the - `App Engine flexible environment`_ then the credentials and project ID + (first generation) then the credentials and project ID from the + `App Identity Service`_ are used. + 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or + the `App Engine flexible environment`_ or the `App Engine standard + environment`_ (second generation) then the credentials and project ID are obtained from the `Metadata Service`_. 5. If no credentials are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised. @@ -293,6 +294,7 @@ def default(scopes=None, request=None, quota_project_id=None): /appengine/flexible .. _Metadata Service: https://cloud.google.com/compute/docs\ /storing-retrieving-metadata + .. _Cloud Run: https://cloud.google.com/run Example:: diff --git a/google/auth/_default_async.py b/google/auth/_default_async.py index 3347fbfdc..1a725afba 100644 --- a/google/auth/_default_async.py +++ b/google/auth/_default_async.py @@ -187,10 +187,11 @@ def default_async(scopes=None, request=None, quota_project_id=None): gcloud config set project 3. If the application is running in the `App Engine standard environment`_ - then the credentials and project ID from the `App Identity Service`_ - are used. - 4. If the application is running in `Compute Engine`_ or the - `App Engine flexible environment`_ then the credentials and project ID + (first generation) then the credentials and project ID from the + `App Identity Service`_ are used. + 4. If the application is running in `Compute Engine`_ or `Cloud Run`_ or + the `App Engine flexible environment`_ or the `App Engine standard + environment`_ (second generation) then the credentials and project ID are obtained from the `Metadata Service`_. 5. If no credentials are found, :class:`~google.auth.exceptions.DefaultCredentialsError` will be raised. @@ -206,6 +207,7 @@ def default_async(scopes=None, request=None, quota_project_id=None): /appengine/flexible .. _Metadata Service: https://cloud.google.com/compute/docs\ /storing-retrieving-metadata + .. _Cloud Run: https://cloud.google.com/run Example:: diff --git a/google/auth/compute_engine/_metadata.py b/google/auth/compute_engine/_metadata.py index fe821418e..5687a42f9 100644 --- a/google/auth/compute_engine/_metadata.py +++ b/google/auth/compute_engine/_metadata.py @@ -108,7 +108,9 @@ def ping(request, timeout=_METADATA_DEFAULT_TIMEOUT, retry_count=3): return False -def get(request, path, root=_METADATA_ROOT, recursive=False, retry_count=5): +def get( + request, path, root=_METADATA_ROOT, params=None, recursive=False, retry_count=5 +): """Fetch a resource from the metadata server. Args: @@ -117,6 +119,8 @@ def get(request, path, root=_METADATA_ROOT, recursive=False, retry_count=5): path (str): The resource to retrieve. For example, ``'instance/service-accounts/default'``. root (str): The full path to the metadata server root. + params (Optional[Mapping[str, str]]): A mapping of query parameter + keys to values. recursive (bool): Whether to do a recursive query of metadata. See https://cloud.google.com/compute/docs/metadata#aggcontents for more details. @@ -133,7 +137,7 @@ def get(request, path, root=_METADATA_ROOT, recursive=False, retry_count=5): retrieving metadata. """ base_url = urlparse.urljoin(root, path) - query_params = {} + query_params = {} if params is None else params if recursive: query_params["recursive"] = "true" @@ -224,14 +228,13 @@ def get_service_account_info(request, service_account="default"): google.auth.exceptions.TransportError: if an error occurred while retrieving metadata. """ - return get( - request, - "instance/service-accounts/{0}/".format(service_account), - recursive=True, - ) + path = "instance/service-accounts/{0}/".format(service_account) + # See https://cloud.google.com/compute/docs/metadata#aggcontents + # for more on the use of 'recursive'. + return get(request, path, params={"recursive": "true"}) -def get_service_account_token(request, service_account="default"): +def get_service_account_token(request, service_account="default", scopes=None): """Get the OAuth 2.0 access token for a service account. Args: @@ -240,7 +243,8 @@ def get_service_account_token(request, service_account="default"): service_account (str): The string 'default' or a service account email address. The determines which service account for which to acquire an access token. - + scopes (Optional[Union[str, List[str]]]): Optional string or list of + strings with auth scopes. Returns: Union[str, datetime]: The access token and its expiration. @@ -248,9 +252,15 @@ def get_service_account_token(request, service_account="default"): google.auth.exceptions.TransportError: if an error occurred while retrieving metadata. """ - token_json = get( - request, "instance/service-accounts/{0}/token".format(service_account) - ) + if scopes: + if not isinstance(scopes, str): + scopes = ",".join(scopes) + params = {"scopes": scopes} + else: + params = None + + path = "instance/service-accounts/{0}/token".format(service_account) + token_json = get(request, path, params=params) token_expiry = _helpers.utcnow() + datetime.timedelta( seconds=token_json["expires_in"] ) diff --git a/google/auth/compute_engine/credentials.py b/google/auth/compute_engine/credentials.py index b7fca1832..4ac6c8c2c 100644 --- a/google/auth/compute_engine/credentials.py +++ b/google/auth/compute_engine/credentials.py @@ -32,29 +32,28 @@ from google.oauth2 import _client -class Credentials(credentials.ReadOnlyScoped, credentials.CredentialsWithQuotaProject): +class Credentials(credentials.Scoped, credentials.CredentialsWithQuotaProject): """Compute Engine Credentials. These credentials use the Google Compute Engine metadata server to obtain - OAuth 2.0 access tokens associated with the instance's service account. + OAuth 2.0 access tokens associated with the instance's service account, + and are also used for Cloud Run, Flex and App Engine (except for the Python + 2.7 runtime). For more information about Compute Engine authentication, including how to configure scopes, see the `Compute Engine authentication documentation`_. - .. note:: Compute Engine instances can be created with scopes and therefore - these credentials are considered to be 'scoped'. However, you can - not use :meth:`~google.auth.credentials.ScopedCredentials.with_scopes` - because it is not possible to change the scopes that the instance - has. Also note that - :meth:`~google.auth.credentials.ScopedCredentials.has_scopes` will not - work until the credentials have been refreshed. + .. note:: On Compute Engine the metadata server ignores requested scopes. + On Cloud Run, Flex and App Engine the server honours requested scopes. .. _Compute Engine authentication documentation: https://cloud.google.com/compute/docs/authentication#using """ - def __init__(self, service_account_email="default", quota_project_id=None): + def __init__( + self, service_account_email="default", quota_project_id=None, scopes=None + ): """ Args: service_account_email (str): The service account email to use, or @@ -66,6 +65,7 @@ def __init__(self, service_account_email="default", quota_project_id=None): super(Credentials, self).__init__() self._service_account_email = service_account_email self._quota_project_id = quota_project_id + self._scopes = scopes def _retrieve_info(self, request): """Retrieve information about the service account. @@ -81,7 +81,10 @@ def _retrieve_info(self, request): ) self._service_account_email = info["email"] - self._scopes = info["scopes"] + + # Don't override scopes requested by the user. + if self._scopes is None: + self._scopes = info["scopes"] def refresh(self, request): """Refresh the access token and scopes. @@ -98,7 +101,9 @@ def refresh(self, request): try: self._retrieve_info(request) self.token, self.expiry = _metadata.get_service_account_token( - request, service_account=self._service_account_email + request, + service_account=self._service_account_email, + scopes=self._scopes, ) except exceptions.TransportError as caught_exc: new_exc = exceptions.RefreshError(caught_exc) @@ -115,14 +120,25 @@ def service_account_email(self): @property def requires_scopes(self): - """False: Compute Engine credentials can not be scoped.""" - return False + return not self._scopes @_helpers.copy_docstring(credentials.CredentialsWithQuotaProject) def with_quota_project(self, quota_project_id): return self.__class__( service_account_email=self._service_account_email, quota_project_id=quota_project_id, + scopes=self._scopes, + ) + + @_helpers.copy_docstring(credentials.Scoped) + def with_scopes(self, scopes): + # Compute Engine credentials can not be scoped (the metadata service + # ignores the scopes parameter). App Engine, Cloud Run and Flex support + # requesting scopes. + return self.__class__( + scopes=scopes, + service_account_email=self._service_account_email, + quota_project_id=self._quota_project_id, ) @@ -323,12 +339,9 @@ def _call_metadata_identity_endpoint(self, request): ValueError: If extracting expiry from the obtained ID token fails. """ try: - id_token = _metadata.get( - request, - "instance/service-accounts/default/identity?audience={}&format=full".format( - self._target_audience - ), - ) + path = "instance/service-accounts/default/identity" + params = {"audience": self._target_audience, "format": "full"} + id_token = _metadata.get(request, path, params=params) except exceptions.TransportError as caught_exc: new_exc = exceptions.RefreshError(caught_exc) six.raise_from(new_exc, caught_exc) diff --git a/google/auth/crypt/_cryptography_rsa.py b/google/auth/crypt/_cryptography_rsa.py index e94bc681e..916c9d80a 100644 --- a/google/auth/crypt/_cryptography_rsa.py +++ b/google/auth/crypt/_cryptography_rsa.py @@ -25,23 +25,10 @@ from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric import padding import cryptography.x509 -import pkg_resources from google.auth import _helpers from google.auth.crypt import base -_IMPORT_ERROR_MSG = ( - "cryptography>=1.4.0 is required to use cryptography-based RSA " "implementation." -) - -try: # pragma: NO COVER - release = pkg_resources.get_distribution("cryptography").parsed_version - if release < pkg_resources.parse_version("1.4.0"): - raise ImportError(_IMPORT_ERROR_MSG) -except pkg_resources.DistributionNotFound: # pragma: NO COVER - raise ImportError(_IMPORT_ERROR_MSG) - - _CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----" _BACKEND = backends.default_backend() _PADDING = padding.PKCS1v15() diff --git a/google/auth/crypt/es256.py b/google/auth/crypt/es256.py index 6955efcc5..c6d617606 100644 --- a/google/auth/crypt/es256.py +++ b/google/auth/crypt/es256.py @@ -25,22 +25,10 @@ from cryptography.hazmat.primitives.asymmetric.utils import decode_dss_signature from cryptography.hazmat.primitives.asymmetric.utils import encode_dss_signature import cryptography.x509 -import pkg_resources from google.auth import _helpers from google.auth.crypt import base -_IMPORT_ERROR_MSG = ( - "cryptography>=1.4.0 is required to use cryptography-based ECDSA " "algorithms" -) - -try: # pragma: NO COVER - release = pkg_resources.get_distribution("cryptography").parsed_version - if release < pkg_resources.parse_version("1.4.0"): - raise ImportError(_IMPORT_ERROR_MSG) -except pkg_resources.DistributionNotFound: # pragma: NO COVER - raise ImportError(_IMPORT_ERROR_MSG) - _CERTIFICATE_MARKER = b"-----BEGIN CERTIFICATE-----" _BACKEND = backends.default_backend() diff --git a/google/oauth2/id_token.py b/google/oauth2/id_token.py index bf6bf2c70..d70782b51 100644 --- a/google/oauth2/id_token.py +++ b/google/oauth2/id_token.py @@ -33,9 +33,6 @@ id_info = id_token.verify_oauth2_token( token, request, 'my-client-id.example.com') - if id_info['iss'] != 'https://accounts.google.com': - raise ValueError('Wrong issuer.') - userid = id_info['sub'] By default, this will re-fetch certificates for each verification. Because diff --git a/noxfile.py b/noxfile.py index d497f5305..b92f4939d 100644 --- a/noxfile.py +++ b/noxfile.py @@ -30,7 +30,7 @@ "grpcio", ] -ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses"] +ASYNC_DEPENDENCIES = ["pytest-asyncio", "aioresponses", "asynctest"] BLACK_VERSION = "black==19.3b0" BLACK_PATHS = [ @@ -144,6 +144,7 @@ def docs(session): @nox.session(python="pypy") def pypy(session): session.install(*TEST_DEPENDENCIES) + session.install(*ASYNC_DEPENDENCIES) session.install(".") session.run( "pytest", diff --git a/setup.py b/setup.py index 522b98103..d599ecc17 100644 --- a/setup.py +++ b/setup.py @@ -34,7 +34,7 @@ with io.open("README.rst", "r") as fh: long_description = fh.read() -version = "1.22.1" +version = "1.23.0" setup( name="google-auth", diff --git a/synth.metadata b/synth.metadata index 901a2cb9b..5e1ef9a55 100644 --- a/synth.metadata +++ b/synth.metadata @@ -4,15 +4,51 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/google-auth-library-python.git", - "sha": "218a159f646c81021c890b92f9cff003aed949a8" + "sha": "9c4200dff31986b7ff300126e9aa35d14aa84dba" } }, { "git": { "name": "synthtool", "remote": "https://github.com/googleapis/synthtool.git", - "sha": "ffe10407ee2f261c799fb0d01bf32a8abc67ed1e" + "sha": "da5c6050d13b4950c82666a81d8acd25157664ae" } } + ], + "generatedFiles": [ + ".kokoro/build.sh", + ".kokoro/continuous/common.cfg", + ".kokoro/continuous/continuous.cfg", + ".kokoro/docker/docs/Dockerfile", + ".kokoro/docker/docs/fetch_gpg_keys.sh", + ".kokoro/docs/common.cfg", + ".kokoro/docs/docs-presubmit.cfg", + ".kokoro/docs/docs.cfg", + ".kokoro/populate-secrets.sh", + ".kokoro/presubmit/common.cfg", + ".kokoro/presubmit/presubmit.cfg", + ".kokoro/publish-docs.sh", + ".kokoro/release.sh", + ".kokoro/release/common.cfg", + ".kokoro/release/release.cfg", + ".kokoro/samples/lint/common.cfg", + ".kokoro/samples/lint/continuous.cfg", + ".kokoro/samples/lint/periodic.cfg", + ".kokoro/samples/lint/presubmit.cfg", + ".kokoro/samples/python3.6/common.cfg", + ".kokoro/samples/python3.6/continuous.cfg", + ".kokoro/samples/python3.6/periodic.cfg", + ".kokoro/samples/python3.6/presubmit.cfg", + ".kokoro/samples/python3.7/common.cfg", + ".kokoro/samples/python3.7/continuous.cfg", + ".kokoro/samples/python3.7/periodic.cfg", + ".kokoro/samples/python3.7/presubmit.cfg", + ".kokoro/samples/python3.8/common.cfg", + ".kokoro/samples/python3.8/continuous.cfg", + ".kokoro/samples/python3.8/periodic.cfg", + ".kokoro/samples/python3.8/presubmit.cfg", + ".kokoro/test-samples.sh", + ".kokoro/trampoline.sh", + ".kokoro/trampoline_v2.sh" ] } \ No newline at end of file diff --git a/system_tests/noxfile.py b/system_tests/noxfile.py index a039228d9..dcfe8ee81 100644 --- a/system_tests/noxfile.py +++ b/system_tests/noxfile.py @@ -315,7 +315,7 @@ def default_explicit_service_account_async(session): session.env[EXPECT_PROJECT_ENV] = "1" session.install(*(TEST_DEPENDENCIES_SYNC + TEST_DEPENDENCIES_ASYNC)) session.install(LIBRARY_DIR) - session.run("pytest", "system_tests_async/test_default.py", + session.run("pytest", "system_tests_async/test_default.py", "system_tests_async/test_id_token.py") diff --git a/tests/compute_engine/test__metadata.py b/tests/compute_engine/test__metadata.py index d9b039a32..d05337263 100644 --- a/tests/compute_engine/test__metadata.py +++ b/tests/compute_engine/test__metadata.py @@ -155,6 +155,49 @@ def test_get_success_text(): assert result == data +def test_get_success_params(): + data = "foobar" + request = make_request(data, headers={"content-type": "text/plain"}) + params = {"recursive": "true"} + + result = _metadata.get(request, PATH, params=params) + + request.assert_called_once_with( + method="GET", + url=_metadata._METADATA_ROOT + PATH + "?recursive=true", + headers=_metadata._METADATA_HEADERS, + ) + assert result == data + + +def test_get_success_recursive_and_params(): + data = "foobar" + request = make_request(data, headers={"content-type": "text/plain"}) + params = {"recursive": "false"} + result = _metadata.get(request, PATH, recursive=True, params=params) + + request.assert_called_once_with( + method="GET", + url=_metadata._METADATA_ROOT + PATH + "?recursive=true", + headers=_metadata._METADATA_HEADERS, + ) + assert result == data + + +def test_get_success_recursive(): + data = "foobar" + request = make_request(data, headers={"content-type": "text/plain"}) + + result = _metadata.get(request, PATH, recursive=True) + + request.assert_called_once_with( + method="GET", + url=_metadata._METADATA_ROOT + PATH + "?recursive=true", + headers=_metadata._METADATA_HEADERS, + ) + assert result == data + + def test_get_success_custom_root_new_variable(): request = make_request("{}", headers={"content-type": "application/json"}) diff --git a/tests/compute_engine/test_credentials.py b/tests/compute_engine/test_credentials.py index 4ee653676..ebe9aa5ba 100644 --- a/tests/compute_engine/test_credentials.py +++ b/tests/compute_engine/test_credentials.py @@ -55,8 +55,8 @@ def test_default_state(self): assert not self.credentials.valid # Expiration hasn't been set yet assert not self.credentials.expired - # Scopes aren't needed - assert not self.credentials.requires_scopes + # Scopes are needed + assert self.credentials.requires_scopes # Service account email hasn't been populated assert self.credentials.service_account_email == "default" # No quota project @@ -96,6 +96,45 @@ def test_refresh_success(self, get, utcnow): # expired) assert self.credentials.valid + @mock.patch( + "google.auth._helpers.utcnow", + return_value=datetime.datetime.min + _helpers.CLOCK_SKEW, + ) + @mock.patch("google.auth.compute_engine._metadata.get", autospec=True) + def test_refresh_success_with_scopes(self, get, utcnow): + get.side_effect = [ + { + # First request is for sevice account info. + "email": "service-account@example.com", + "scopes": ["one", "two"], + }, + { + # Second request is for the token. + "access_token": "token", + "expires_in": 500, + }, + ] + + # Refresh credentials + scopes = ["three", "four"] + self.credentials = self.credentials.with_scopes(scopes) + self.credentials.refresh(None) + + # Check that the credentials have the token and proper expiration + assert self.credentials.token == "token" + assert self.credentials.expiry == (utcnow() + datetime.timedelta(seconds=500)) + + # Check the credential info + assert self.credentials.service_account_email == "service-account@example.com" + assert self.credentials._scopes == scopes + + # Check that the credentials are valid (have a token and are not + # expired) + assert self.credentials.valid + + kwargs = get.call_args[1] + assert kwargs == {"params": {"scopes": "three,four"}} + @mock.patch("google.auth.compute_engine._metadata.get", autospec=True) def test_refresh_error(self, get): get.side_effect = exceptions.TransportError("http error") @@ -138,6 +177,14 @@ def test_with_quota_project(self): assert quota_project_creds._quota_project_id == "project-foo" + def test_with_scopes(self): + assert self.credentials._scopes is None + + scopes = ["one", "two"] + self.credentials = self.credentials.with_scopes(scopes) + + assert self.credentials._scopes == scopes + class TestIDTokenCredentials(object): credentials = None pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy