diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 10cf433a8b..c631e1f7d7 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8ff1efe878e18bd82a0fb7b70bb86f77e7ab6901fed394440b6135db0ba8d84a -# created: 2025-01-09T12:01:16.422459506Z + digest: sha256:5581906b957284864632cde4e9c51d1cc66b0094990b27e689132fe5cd036046 +# created: 2025-03-05 diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 7ddfe694b0..6c576c55bf 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -15,11 +15,13 @@ set -eo pipefail +CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}") + if [[ -z "${PROJECT_ROOT:-}" ]]; then - PROJECT_ROOT="github/python-spanner" + PROJECT_ROOT=$(realpath "${CURRENT_DIR}/..") fi -cd "${PROJECT_ROOT}" +pushd "${PROJECT_ROOT}" # Disable buffering, so that the logs stream through. export PYTHONUNBUFFERED=1 @@ -28,13 +30,19 @@ export PYTHONUNBUFFERED=1 env | grep KOKORO # Setup service account credentials. -export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +if [[ -f "${KOKORO_GFILE_DIR}/service-account.json" ]] +then + export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +fi # Set up creating a new instance for each system test run export GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE=true # Setup project id. -export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +if [[ -f "${KOKORO_GFILE_DIR}/project-id.json" ]] +then + export PROJECT_ID=$(cat "${KOKORO_GFILE_DIR}/project-id.json") +fi # If this is a continuous build, send the test log to the FlakyBot. # See https://github.com/googleapis/repo-automation-bots/tree/main/packages/flakybot. @@ -49,7 +57,7 @@ fi # If NOX_SESSION is set, it only runs the specified session, # otherwise run all the sessions. if [[ -n "${NOX_SESSION:-}" ]]; then - python3 -m nox -s ${NOX_SESSION:-} + python3 -m nox -s ${NOX_SESSION:-} else - python3 -m nox + python3 -m nox fi diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile deleted file mode 100644 index e5410e296b..0000000000 --- a/.kokoro/docker/docs/Dockerfile +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ubuntu:24.04 - -ENV DEBIAN_FRONTEND noninteractive - -# Ensure local Python is preferred over distribution Python. -ENV PATH /usr/local/bin:$PATH - -# Install dependencies. -RUN apt-get update \ - && apt-get install -y --no-install-recommends \ - apt-transport-https \ - build-essential \ - ca-certificates \ - curl \ - dirmngr \ - git \ - gpg-agent \ - graphviz \ - libbz2-dev \ - libdb5.3-dev \ - libexpat1-dev \ - libffi-dev \ - liblzma-dev \ - libreadline-dev \ - libsnappy-dev \ - libssl-dev \ - libsqlite3-dev \ - portaudio19-dev \ - redis-server \ - software-properties-common \ - ssh \ - sudo \ - tcl \ - tcl-dev \ - tk \ - tk-dev \ - uuid-dev \ - wget \ - zlib1g-dev \ - && add-apt-repository universe \ - && apt-get update \ - && apt-get -y install jq \ - && apt-get clean autoclean \ - && apt-get autoremove -y \ - && rm -rf /var/lib/apt/lists/* \ - && rm -f /var/cache/apt/archives/*.deb - - -###################### Install python 3.10.14 for docs/docfx session - -# Download python 3.10.14 -RUN wget https://www.python.org/ftp/python/3.10.14/Python-3.10.14.tgz - -# Extract files -RUN tar -xvf Python-3.10.14.tgz - -# Install python 3.10.14 -RUN ./Python-3.10.14/configure --enable-optimizations -RUN make altinstall - -ENV PATH /usr/local/bin/python3.10:$PATH - -###################### Install pip -RUN wget -O /tmp/get-pip.py 'https://bootstrap.pypa.io/get-pip.py' \ - && python3.10 /tmp/get-pip.py \ - && rm /tmp/get-pip.py - -# Test pip -RUN python3.10 -m pip - -# Install build requirements -COPY requirements.txt /requirements.txt -RUN python3.10 -m pip install --require-hashes -r requirements.txt - -CMD ["python3.10"] diff --git a/.kokoro/docker/docs/requirements.in b/.kokoro/docker/docs/requirements.in deleted file mode 100644 index 816817c672..0000000000 --- a/.kokoro/docker/docs/requirements.in +++ /dev/null @@ -1 +0,0 @@ -nox diff --git a/.kokoro/docker/docs/requirements.txt b/.kokoro/docker/docs/requirements.txt deleted file mode 100644 index f99a5c4aac..0000000000 --- a/.kokoro/docker/docs/requirements.txt +++ /dev/null @@ -1,72 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.10 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes synthtool/gcp/templates/python_library/.kokoro/docker/docs/requirements.in -# -argcomplete==3.5.2 \ - --hash=sha256:036d020d79048a5d525bc63880d7a4b8d1668566b8a76daf1144c0bbe0f63472 \ - --hash=sha256:23146ed7ac4403b70bd6026402468942ceba34a6732255b9edf5b7354f68a6bb - # via nox -colorlog==6.9.0 \ - --hash=sha256:5906e71acd67cb07a71e779c47c4bcb45fb8c2993eebe9e5adcd6a6f1b283eff \ - --hash=sha256:bfba54a1b93b94f54e1f4fe48395725a3d92fd2a4af702f6bd70946bdc0c6ac2 - # via nox -distlib==0.3.9 \ - --hash=sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87 \ - --hash=sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403 - # via virtualenv -filelock==3.16.1 \ - --hash=sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0 \ - --hash=sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435 - # via virtualenv -nox==2024.10.9 \ - --hash=sha256:1d36f309a0a2a853e9bccb76bbef6bb118ba92fa92674d15604ca99adeb29eab \ - --hash=sha256:7aa9dc8d1c27e9f45ab046ffd1c3b2c4f7c91755304769df231308849ebded95 - # via -r synthtool/gcp/templates/python_library/.kokoro/docker/docs/requirements.in -packaging==24.2 \ - --hash=sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759 \ - --hash=sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f - # via nox -platformdirs==4.3.6 \ - --hash=sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907 \ - --hash=sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb - # via virtualenv -tomli==2.2.1 \ - --hash=sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6 \ - --hash=sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd \ - --hash=sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c \ - --hash=sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b \ - --hash=sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8 \ - --hash=sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6 \ - --hash=sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77 \ - --hash=sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff \ - --hash=sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea \ - --hash=sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192 \ - --hash=sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249 \ - --hash=sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee \ - --hash=sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4 \ - --hash=sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98 \ - --hash=sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8 \ - --hash=sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4 \ - --hash=sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281 \ - --hash=sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744 \ - --hash=sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69 \ - --hash=sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13 \ - --hash=sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140 \ - --hash=sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e \ - --hash=sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e \ - --hash=sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc \ - --hash=sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff \ - --hash=sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec \ - --hash=sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2 \ - --hash=sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222 \ - --hash=sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106 \ - --hash=sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272 \ - --hash=sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a \ - --hash=sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7 - # via nox -virtualenv==20.28.0 \ - --hash=sha256:23eae1b4516ecd610481eda647f3a7c09aea295055337331bb4e6892ecce47b0 \ - --hash=sha256:2c9c3262bb8e7b87ea801d715fae4495e6032450c71d2309be9550e7364049aa - # via nox diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg deleted file mode 100644 index fbf5e405bd..0000000000 --- a/.kokoro/docs/common.cfg +++ /dev/null @@ -1,66 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-spanner/.kokoro/trampoline_v2.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-lib-docs" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-spanner/.kokoro/publish-docs.sh" -} - -env_vars: { - key: "STAGING_BUCKET" - value: "docs-staging" -} - -env_vars: { - key: "V2_STAGING_BUCKET" - # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2` - value: "docs-staging-v2" -} - -# It will upload the docker image after successful builds. -env_vars: { - key: "TRAMPOLINE_IMAGE_UPLOAD" - value: "true" -} - -# It will always build the docker image. -env_vars: { - key: "TRAMPOLINE_DOCKERFILE" - value: ".kokoro/docker/docs/Dockerfile" -} - -# Fetch the token needed for reporting release status to GitHub -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "yoshi-automation-github-key" - } - } -} - -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "docuploader_service_account" - } - } -} diff --git a/.kokoro/docs/docs-presubmit.cfg b/.kokoro/docs/docs-presubmit.cfg deleted file mode 100644 index 505636c275..0000000000 --- a/.kokoro/docs/docs-presubmit.cfg +++ /dev/null @@ -1,28 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -env_vars: { - key: "STAGING_BUCKET" - value: "gcloud-python-test" -} - -env_vars: { - key: "V2_STAGING_BUCKET" - value: "gcloud-python-test" -} - -# We only upload the image in the main `docs` build. -env_vars: { - key: "TRAMPOLINE_IMAGE_UPLOAD" - value: "false" -} - -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-spanner/.kokoro/build.sh" -} - -# Only run this nox session. -env_vars: { - key: "NOX_SESSION" - value: "docs docfx" -} diff --git a/.kokoro/docs/docs.cfg b/.kokoro/docs/docs.cfg deleted file mode 100644 index 8f43917d92..0000000000 --- a/.kokoro/docs/docs.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh deleted file mode 100755 index 233205d580..0000000000 --- a/.kokoro/publish-docs.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -export PATH="${HOME}/.local/bin:${PATH}" - -# Install nox -python3.10 -m pip install --require-hashes -r .kokoro/requirements.txt -python3.10 -m nox --version - -# build docs -nox -s docs - -# create metadata -python3.10 -m docuploader create-metadata \ - --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ - --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ - --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ - --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ - --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - -cat docs.metadata - -# upload docs -python3.10 -m docuploader upload docs/_build/html --metadata-file docs.metadata --staging-bucket "${STAGING_BUCKET}" - - -# docfx yaml files -nox -s docfx - -# create metadata. -python3.10 -m docuploader create-metadata \ - --name=$(jq --raw-output '.name // empty' .repo-metadata.json) \ - --version=$(python3.10 setup.py --version) \ - --language=$(jq --raw-output '.language // empty' .repo-metadata.json) \ - --distribution-name=$(python3.10 setup.py --name) \ - --product-page=$(jq --raw-output '.product_documentation // empty' .repo-metadata.json) \ - --github-repository=$(jq --raw-output '.repo // empty' .repo-metadata.json) \ - --issue-tracker=$(jq --raw-output '.issue_tracker // empty' .repo-metadata.json) - -cat docs.metadata - -# upload docs -python3.10 -m docuploader upload docs/_build/html/docfx_yaml --metadata-file docs.metadata --destination-prefix docfx --staging-bucket "${V2_STAGING_BUCKET}" diff --git a/.kokoro/release.sh b/.kokoro/release.sh deleted file mode 100755 index 0b16dec307..0000000000 --- a/.kokoro/release.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2024 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -set -eo pipefail - -# Start the releasetool reporter -python3 -m pip install --require-hashes -r github/python-spanner/.kokoro/requirements.txt -python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script - -# Disable buffering, so that the logs stream through. -export PYTHONUNBUFFERED=1 - -# Move into the package, build the distribution and upload. -TWINE_PASSWORD=$(cat "${KOKORO_KEYSTORE_DIR}/73713_google-cloud-pypi-token-keystore-2") -cd github/python-spanner -python3 setup.py sdist bdist_wheel -twine upload --username __token__ --password "${TWINE_PASSWORD}" dist/* diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg deleted file mode 100644 index 351e701429..0000000000 --- a/.kokoro/release/common.cfg +++ /dev/null @@ -1,49 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto - -# Build logs will be here -action { - define_artifacts { - regex: "**/*sponge_log.xml" - } -} - -# Download trampoline resources. -gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" - -# Use the trampoline script to run in docker. -build_file: "python-spanner/.kokoro/trampoline.sh" - -# Configure the docker image for kokoro-trampoline. -env_vars: { - key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-kokoro-resources/python-multi" -} -env_vars: { - key: "TRAMPOLINE_BUILD_FILE" - value: "github/python-spanner/.kokoro/release.sh" -} - -# Fetch PyPI password -before_action { - fetch_keystore { - keystore_resource { - keystore_config_id: 73713 - keyname: "google-cloud-pypi-token-keystore-2" - } - } -} - -# Tokens needed to report release status back to GitHub -env_vars: { - key: "SECRET_MANAGER_KEYS" - value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" -} - -# Store the packages we uploaded to PyPI. That way, we have a record of exactly -# what we published, which we can use to generate SBOMs and attestations. -action { - define_artifacts { - regex: "github/python-spanner/**/*.tar.gz" - strip_prefix: "github/python-spanner" - } -} diff --git a/.kokoro/release/release.cfg b/.kokoro/release/release.cfg deleted file mode 100644 index 8f43917d92..0000000000 --- a/.kokoro/release/release.cfg +++ /dev/null @@ -1 +0,0 @@ -# Format: //devtools/kokoro/config/proto/build.proto \ No newline at end of file diff --git a/.kokoro/requirements.in b/.kokoro/requirements.in deleted file mode 100644 index fff4d9ce0d..0000000000 --- a/.kokoro/requirements.in +++ /dev/null @@ -1,11 +0,0 @@ -gcp-docuploader -gcp-releasetool>=2 # required for compatibility with cryptography>=42.x -importlib-metadata -typing-extensions -twine -wheel -setuptools -nox>=2022.11.21 # required to remove dependency on py -charset-normalizer<3 -click<8.1.0 -cryptography>=42.0.5 diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt deleted file mode 100644 index 9622baf0ba..0000000000 --- a/.kokoro/requirements.txt +++ /dev/null @@ -1,537 +0,0 @@ -# -# This file is autogenerated by pip-compile with Python 3.9 -# by the following command: -# -# pip-compile --allow-unsafe --generate-hashes requirements.in -# -argcomplete==3.4.0 \ - --hash=sha256:69a79e083a716173e5532e0fa3bef45f793f4e61096cf52b5a42c0211c8b8aa5 \ - --hash=sha256:c2abcdfe1be8ace47ba777d4fce319eb13bf8ad9dace8d085dcad6eded88057f - # via nox -attrs==23.2.0 \ - --hash=sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30 \ - --hash=sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1 - # via gcp-releasetool -backports-tarfile==1.2.0 \ - --hash=sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34 \ - --hash=sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991 - # via jaraco-context -cachetools==5.3.3 \ - --hash=sha256:0abad1021d3f8325b2fc1d2e9c8b9c9d57b04c3932657a72465447332c24d945 \ - --hash=sha256:ba29e2dfa0b8b556606f097407ed1aa62080ee108ab0dc5ec9d6a723a007d105 - # via google-auth -certifi==2024.7.4 \ - --hash=sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b \ - --hash=sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90 - # via requests -cffi==1.16.0 \ - --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ - --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ - --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ - --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ - --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ - --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ - --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ - --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ - --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ - --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ - --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ - --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ - --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ - --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ - --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ - --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ - --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ - --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ - --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ - --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ - --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ - --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ - --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ - --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ - --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ - --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ - --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ - --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ - --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ - --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ - --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ - --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ - --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ - --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ - --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ - --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ - --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ - --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ - --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ - --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ - --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ - --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ - --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ - --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ - --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ - --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ - --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ - --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ - --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ - --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ - --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ - --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 - # via cryptography -charset-normalizer==2.1.1 \ - --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ - --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f - # via - # -r requirements.in - # requests -click==8.0.4 \ - --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \ - --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb - # via - # -r requirements.in - # gcp-docuploader - # gcp-releasetool -colorlog==6.8.2 \ - --hash=sha256:3e3e079a41feb5a1b64f978b5ea4f46040a94f11f0e8bbb8261e3dbbeca64d44 \ - --hash=sha256:4dcbb62368e2800cb3c5abd348da7e53f6c362dda502ec27c560b2e58a66bd33 - # via - # gcp-docuploader - # nox -cryptography==42.0.8 \ - --hash=sha256:013629ae70b40af70c9a7a5db40abe5d9054e6f4380e50ce769947b73bf3caad \ - --hash=sha256:2346b911eb349ab547076f47f2e035fc8ff2c02380a7cbbf8d87114fa0f1c583 \ - --hash=sha256:2f66d9cd9147ee495a8374a45ca445819f8929a3efcd2e3df6428e46c3cbb10b \ - --hash=sha256:2f88d197e66c65be5e42cd72e5c18afbfae3f741742070e3019ac8f4ac57262c \ - --hash=sha256:31f721658a29331f895a5a54e7e82075554ccfb8b163a18719d342f5ffe5ecb1 \ - --hash=sha256:343728aac38decfdeecf55ecab3264b015be68fc2816ca800db649607aeee648 \ - --hash=sha256:5226d5d21ab681f432a9c1cf8b658c0cb02533eece706b155e5fbd8a0cdd3949 \ - --hash=sha256:57080dee41209e556a9a4ce60d229244f7a66ef52750f813bfbe18959770cfba \ - --hash=sha256:5a94eccb2a81a309806027e1670a358b99b8fe8bfe9f8d329f27d72c094dde8c \ - --hash=sha256:6b7c4f03ce01afd3b76cf69a5455caa9cfa3de8c8f493e0d3ab7d20611c8dae9 \ - --hash=sha256:7016f837e15b0a1c119d27ecd89b3515f01f90a8615ed5e9427e30d9cdbfed3d \ - --hash=sha256:81884c4d096c272f00aeb1f11cf62ccd39763581645b0812e99a91505fa48e0c \ - --hash=sha256:81d8a521705787afe7a18d5bfb47ea9d9cc068206270aad0b96a725022e18d2e \ - --hash=sha256:8d09d05439ce7baa8e9e95b07ec5b6c886f548deb7e0f69ef25f64b3bce842f2 \ - --hash=sha256:961e61cefdcb06e0c6d7e3a1b22ebe8b996eb2bf50614e89384be54c48c6b63d \ - --hash=sha256:9c0c1716c8447ee7dbf08d6db2e5c41c688544c61074b54fc4564196f55c25a7 \ - --hash=sha256:a0608251135d0e03111152e41f0cc2392d1e74e35703960d4190b2e0f4ca9c70 \ - --hash=sha256:a0c5b2b0585b6af82d7e385f55a8bc568abff8923af147ee3c07bd8b42cda8b2 \ - --hash=sha256:ad803773e9df0b92e0a817d22fd8a3675493f690b96130a5e24f1b8fabbea9c7 \ - --hash=sha256:b297f90c5723d04bcc8265fc2a0f86d4ea2e0f7ab4b6994459548d3a6b992a14 \ - --hash=sha256:ba4f0a211697362e89ad822e667d8d340b4d8d55fae72cdd619389fb5912eefe \ - --hash=sha256:c4783183f7cb757b73b2ae9aed6599b96338eb957233c58ca8f49a49cc32fd5e \ - --hash=sha256:c9bb2ae11bfbab395bdd072985abde58ea9860ed84e59dbc0463a5d0159f5b71 \ - --hash=sha256:cafb92b2bc622cd1aa6a1dce4b93307792633f4c5fe1f46c6b97cf67073ec961 \ - --hash=sha256:d45b940883a03e19e944456a558b67a41160e367a719833c53de6911cabba2b7 \ - --hash=sha256:dc0fdf6787f37b1c6b08e6dfc892d9d068b5bdb671198c72072828b80bd5fe4c \ - --hash=sha256:dea567d1b0e8bc5764b9443858b673b734100c2871dc93163f58c46a97a83d28 \ - --hash=sha256:dec9b018df185f08483f294cae6ccac29e7a6e0678996587363dc352dc65c842 \ - --hash=sha256:e3ec3672626e1b9e55afd0df6d774ff0e953452886e06e0f1eb7eb0c832e8902 \ - --hash=sha256:e599b53fd95357d92304510fb7bda8523ed1f79ca98dce2f43c115950aa78801 \ - --hash=sha256:fa76fbb7596cc5839320000cdd5d0955313696d9511debab7ee7278fc8b5c84a \ - --hash=sha256:fff12c88a672ab9c9c1cf7b0c80e3ad9e2ebd9d828d955c126be4fd3e5578c9e - # via - # -r requirements.in - # gcp-releasetool - # secretstorage -distlib==0.3.8 \ - --hash=sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784 \ - --hash=sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64 - # via virtualenv -docutils==0.21.2 \ - --hash=sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f \ - --hash=sha256:dafca5b9e384f0e419294eb4d2ff9fa826435bf15f15b7bd45723e8ad76811b2 - # via readme-renderer -filelock==3.15.4 \ - --hash=sha256:2207938cbc1844345cb01a5a95524dae30f0ce089eba5b00378295a17e3e90cb \ - --hash=sha256:6ca1fffae96225dab4c6eaf1c4f4f28cd2568d3ec2a44e15a08520504de468e7 - # via virtualenv -gcp-docuploader==0.6.5 \ - --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ - --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea - # via -r requirements.in -gcp-releasetool==2.0.1 \ - --hash=sha256:34314a910c08e8911d9c965bd44f8f2185c4f556e737d719c33a41f6a610de96 \ - --hash=sha256:b0d5863c6a070702b10883d37c4bdfd74bf930fe417f36c0c965d3b7c779ae62 - # via -r requirements.in -google-api-core==2.19.1 \ - --hash=sha256:f12a9b8309b5e21d92483bbd47ce2c445861ec7d269ef6784ecc0ea8c1fa6125 \ - --hash=sha256:f4695f1e3650b316a795108a76a1c416e6afb036199d1c1f1f110916df479ffd - # via - # google-cloud-core - # google-cloud-storage -google-auth==2.31.0 \ - --hash=sha256:042c4702efa9f7d3c48d3a69341c209381b125faa6dbf3ebe56bc7e40ae05c23 \ - --hash=sha256:87805c36970047247c8afe614d4e3af8eceafc1ebba0c679fe75ddd1d575e871 - # via - # gcp-releasetool - # google-api-core - # google-cloud-core - # google-cloud-storage -google-cloud-core==2.4.1 \ - --hash=sha256:9b7749272a812bde58fff28868d0c5e2f585b82f37e09a1f6ed2d4d10f134073 \ - --hash=sha256:a9e6a4422b9ac5c29f79a0ede9485473338e2ce78d91f2370c01e730eab22e61 - # via google-cloud-storage -google-cloud-storage==2.17.0 \ - --hash=sha256:49378abff54ef656b52dca5ef0f2eba9aa83dc2b2c72c78714b03a1a95fe9388 \ - --hash=sha256:5b393bc766b7a3bc6f5407b9e665b2450d36282614b7945e570b3480a456d1e1 - # via gcp-docuploader -google-crc32c==1.5.0 \ - --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ - --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \ - --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \ - --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \ - --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \ - --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \ - --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \ - --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \ - --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \ - --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \ - --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \ - --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \ - --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \ - --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \ - --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \ - --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \ - --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \ - --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \ - --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \ - --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \ - --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \ - --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \ - --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \ - --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \ - --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \ - --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \ - --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \ - --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \ - --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \ - --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \ - --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \ - --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \ - --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \ - --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \ - --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \ - --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \ - --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \ - --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \ - --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \ - --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \ - --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \ - --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \ - --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \ - --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \ - --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \ - --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \ - --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \ - --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \ - --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \ - --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \ - --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \ - --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \ - --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \ - --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \ - --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \ - --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \ - --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \ - --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \ - --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \ - --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \ - --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \ - --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \ - --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \ - --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \ - --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \ - --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ - --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ - --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 - # via - # google-cloud-storage - # google-resumable-media -google-resumable-media==2.7.1 \ - --hash=sha256:103ebc4ba331ab1bfdac0250f8033627a2cd7cde09e7ccff9181e31ba4315b2c \ - --hash=sha256:eae451a7b2e2cdbaaa0fd2eb00cc8a1ee5e95e16b55597359cbc3d27d7d90e33 - # via google-cloud-storage -googleapis-common-protos==1.63.2 \ - --hash=sha256:27a2499c7e8aff199665b22741997e485eccc8645aa9176c7c988e6fae507945 \ - --hash=sha256:27c5abdffc4911f28101e635de1533fb4cfd2c37fbaa9174587c799fac90aa87 - # via google-api-core -idna==3.7 \ - --hash=sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc \ - --hash=sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0 - # via requests -importlib-metadata==8.0.0 \ - --hash=sha256:15584cf2b1bf449d98ff8a6ff1abef57bf20f3ac6454f431736cd3e660921b2f \ - --hash=sha256:188bd24e4c346d3f0a933f275c2fec67050326a856b9a359881d7c2a697e8812 - # via - # -r requirements.in - # keyring - # twine -jaraco-classes==3.4.0 \ - --hash=sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd \ - --hash=sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790 - # via keyring -jaraco-context==5.3.0 \ - --hash=sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266 \ - --hash=sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2 - # via keyring -jaraco-functools==4.0.1 \ - --hash=sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664 \ - --hash=sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8 - # via keyring -jeepney==0.8.0 \ - --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ - --hash=sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755 - # via - # keyring - # secretstorage -jinja2==3.1.4 \ - --hash=sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369 \ - --hash=sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d - # via gcp-releasetool -keyring==25.2.1 \ - --hash=sha256:2458681cdefc0dbc0b7eb6cf75d0b98e59f9ad9b2d4edd319d18f68bdca95e50 \ - --hash=sha256:daaffd42dbda25ddafb1ad5fec4024e5bbcfe424597ca1ca452b299861e49f1b - # via - # gcp-releasetool - # twine -markdown-it-py==3.0.0 \ - --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ - --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb - # via rich -markupsafe==2.1.5 \ - --hash=sha256:00e046b6dd71aa03a41079792f8473dc494d564611a8f89bbbd7cb93295ebdcf \ - --hash=sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff \ - --hash=sha256:0e397ac966fdf721b2c528cf028494e86172b4feba51d65f81ffd65c63798f3f \ - --hash=sha256:17b950fccb810b3293638215058e432159d2b71005c74371d784862b7e4683f3 \ - --hash=sha256:1f3fbcb7ef1f16e48246f704ab79d79da8a46891e2da03f8783a5b6fa41a9532 \ - --hash=sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f \ - --hash=sha256:2b7c57a4dfc4f16f7142221afe5ba4e093e09e728ca65c51f5620c9aaeb9a617 \ - --hash=sha256:2d2d793e36e230fd32babe143b04cec8a8b3eb8a3122d2aceb4a371e6b09b8df \ - --hash=sha256:30b600cf0a7ac9234b2638fbc0fb6158ba5bdcdf46aeb631ead21248b9affbc4 \ - --hash=sha256:397081c1a0bfb5124355710fe79478cdbeb39626492b15d399526ae53422b906 \ - --hash=sha256:3a57fdd7ce31c7ff06cdfbf31dafa96cc533c21e443d57f5b1ecc6cdc668ec7f \ - --hash=sha256:3c6b973f22eb18a789b1460b4b91bf04ae3f0c4234a0a6aa6b0a92f6f7b951d4 \ - --hash=sha256:3e53af139f8579a6d5f7b76549125f0d94d7e630761a2111bc431fd820e163b8 \ - --hash=sha256:4096e9de5c6fdf43fb4f04c26fb114f61ef0bf2e5604b6ee3019d51b69e8c371 \ - --hash=sha256:4275d846e41ecefa46e2015117a9f491e57a71ddd59bbead77e904dc02b1bed2 \ - --hash=sha256:4c31f53cdae6ecfa91a77820e8b151dba54ab528ba65dfd235c80b086d68a465 \ - --hash=sha256:4f11aa001c540f62c6166c7726f71f7573b52c68c31f014c25cc7901deea0b52 \ - --hash=sha256:5049256f536511ee3f7e1b3f87d1d1209d327e818e6ae1365e8653d7e3abb6a6 \ - --hash=sha256:58c98fee265677f63a4385256a6d7683ab1832f3ddd1e66fe948d5880c21a169 \ - --hash=sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad \ - --hash=sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2 \ - --hash=sha256:5dedb4db619ba5a2787a94d877bc8ffc0566f92a01c0ef214865e54ecc9ee5e0 \ - --hash=sha256:619bc166c4f2de5caa5a633b8b7326fbe98e0ccbfacabd87268a2b15ff73a029 \ - --hash=sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f \ - --hash=sha256:656f7526c69fac7f600bd1f400991cc282b417d17539a1b228617081106feb4a \ - --hash=sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced \ - --hash=sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5 \ - --hash=sha256:7502934a33b54030eaf1194c21c692a534196063db72176b0c4028e140f8f32c \ - --hash=sha256:7a68b554d356a91cce1236aa7682dc01df0edba8d043fd1ce607c49dd3c1edcf \ - --hash=sha256:7b2e5a267c855eea6b4283940daa6e88a285f5f2a67f2220203786dfa59b37e9 \ - --hash=sha256:823b65d8706e32ad2df51ed89496147a42a2a6e01c13cfb6ffb8b1e92bc910bb \ - --hash=sha256:8590b4ae07a35970728874632fed7bd57b26b0102df2d2b233b6d9d82f6c62ad \ - --hash=sha256:8dd717634f5a044f860435c1d8c16a270ddf0ef8588d4887037c5028b859b0c3 \ - --hash=sha256:8dec4936e9c3100156f8a2dc89c4b88d5c435175ff03413b443469c7c8c5f4d1 \ - --hash=sha256:97cafb1f3cbcd3fd2b6fbfb99ae11cdb14deea0736fc2b0952ee177f2b813a46 \ - --hash=sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc \ - --hash=sha256:a549b9c31bec33820e885335b451286e2969a2d9e24879f83fe904a5ce59d70a \ - --hash=sha256:ac07bad82163452a6884fe8fa0963fb98c2346ba78d779ec06bd7a6262132aee \ - --hash=sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900 \ - --hash=sha256:b91c037585eba9095565a3556f611e3cbfaa42ca1e865f7b8015fe5c7336d5a5 \ - --hash=sha256:bc1667f8b83f48511b94671e0e441401371dfd0f0a795c7daa4a3cd1dde55bea \ - --hash=sha256:bec0a414d016ac1a18862a519e54b2fd0fc8bbfd6890376898a6c0891dd82e9f \ - --hash=sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5 \ - --hash=sha256:bff1b4290a66b490a2f4719358c0cdcd9bafb6b8f061e45c7a2460866bf50c2e \ - --hash=sha256:c061bb86a71b42465156a3ee7bd58c8c2ceacdbeb95d05a99893e08b8467359a \ - --hash=sha256:c8b29db45f8fe46ad280a7294f5c3ec36dbac9491f2d1c17345be8e69cc5928f \ - --hash=sha256:ce409136744f6521e39fd8e2a24c53fa18ad67aa5bc7c2cf83645cce5b5c4e50 \ - --hash=sha256:d050b3361367a06d752db6ead6e7edeb0009be66bc3bae0ee9d97fb326badc2a \ - --hash=sha256:d283d37a890ba4c1ae73ffadf8046435c76e7bc2247bbb63c00bd1a709c6544b \ - --hash=sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4 \ - --hash=sha256:daa4ee5a243f0f20d528d939d06670a298dd39b1ad5f8a72a4275124a7819eff \ - --hash=sha256:db0b55e0f3cc0be60c1f19efdde9a637c32740486004f20d1cff53c3c0ece4d2 \ - --hash=sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46 \ - --hash=sha256:ea3d8a3d18833cf4304cd2fc9cbb1efe188ca9b5efef2bdac7adc20594a0e46b \ - --hash=sha256:ec6a563cff360b50eed26f13adc43e61bc0c04d94b8be985e6fb24b81f6dcfdf \ - --hash=sha256:f5dfb42c4604dddc8e4305050aa6deb084540643ed5804d7455b5df8fe16f5e5 \ - --hash=sha256:fa173ec60341d6bb97a89f5ea19c85c5643c1e7dedebc22f5181eb73573142c5 \ - --hash=sha256:fa9db3f79de01457b03d4f01b34cf91bc0048eb2c3846ff26f66687c2f6d16ab \ - --hash=sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd \ - --hash=sha256:ffee1f21e5ef0d712f9033568f8344d5da8cc2869dbd08d87c84656e6a2d2f68 - # via jinja2 -mdurl==0.1.2 \ - --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ - --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba - # via markdown-it-py -more-itertools==10.3.0 \ - --hash=sha256:e5d93ef411224fbcef366a6e8ddc4c5781bc6359d43412a65dd5964e46111463 \ - --hash=sha256:ea6a02e24a9161e51faad17a8782b92a0df82c12c1c8886fec7f0c3fa1a1b320 - # via - # jaraco-classes - # jaraco-functools -nh3==0.2.18 \ - --hash=sha256:0411beb0589eacb6734f28d5497ca2ed379eafab8ad8c84b31bb5c34072b7164 \ - --hash=sha256:14c5a72e9fe82aea5fe3072116ad4661af5cf8e8ff8fc5ad3450f123e4925e86 \ - --hash=sha256:19aaba96e0f795bd0a6c56291495ff59364f4300d4a39b29a0abc9cb3774a84b \ - --hash=sha256:34c03fa78e328c691f982b7c03d4423bdfd7da69cd707fe572f544cf74ac23ad \ - --hash=sha256:36c95d4b70530b320b365659bb5034341316e6a9b30f0b25fa9c9eff4c27a204 \ - --hash=sha256:3a157ab149e591bb638a55c8c6bcb8cdb559c8b12c13a8affaba6cedfe51713a \ - --hash=sha256:42c64511469005058cd17cc1537578eac40ae9f7200bedcfd1fc1a05f4f8c200 \ - --hash=sha256:5f36b271dae35c465ef5e9090e1fdaba4a60a56f0bb0ba03e0932a66f28b9189 \ - --hash=sha256:6955369e4d9f48f41e3f238a9e60f9410645db7e07435e62c6a9ea6135a4907f \ - --hash=sha256:7b7c2a3c9eb1a827d42539aa64091640bd275b81e097cd1d8d82ef91ffa2e811 \ - --hash=sha256:8ce0f819d2f1933953fca255db2471ad58184a60508f03e6285e5114b6254844 \ - --hash=sha256:94a166927e53972a9698af9542ace4e38b9de50c34352b962f4d9a7d4c927af4 \ - --hash=sha256:a7f1b5b2c15866f2db413a3649a8fe4fd7b428ae58be2c0f6bca5eefd53ca2be \ - --hash=sha256:c8b3a1cebcba9b3669ed1a84cc65bf005728d2f0bc1ed2a6594a992e817f3a50 \ - --hash=sha256:de3ceed6e661954871d6cd78b410213bdcb136f79aafe22aa7182e028b8c7307 \ - --hash=sha256:f0eca9ca8628dbb4e916ae2491d72957fdd35f7a5d326b7032a345f111ac07fe - # via readme-renderer -nox==2024.4.15 \ - --hash=sha256:6492236efa15a460ecb98e7b67562a28b70da006ab0be164e8821177577c0565 \ - --hash=sha256:ecf6700199cdfa9e5ea0a41ff5e6ef4641d09508eda6edb89d9987864115817f - # via -r requirements.in -packaging==24.1 \ - --hash=sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002 \ - --hash=sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124 - # via - # gcp-releasetool - # nox -pkginfo==1.10.0 \ - --hash=sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297 \ - --hash=sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097 - # via twine -platformdirs==4.2.2 \ - --hash=sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee \ - --hash=sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3 - # via virtualenv -proto-plus==1.24.0 \ - --hash=sha256:30b72a5ecafe4406b0d339db35b56c4059064e69227b8c3bda7462397f966445 \ - --hash=sha256:402576830425e5f6ce4c2a6702400ac79897dab0b4343821aa5188b0fab81a12 - # via google-api-core -protobuf==5.27.2 \ - --hash=sha256:0e341109c609749d501986b835f667c6e1e24531096cff9d34ae411595e26505 \ - --hash=sha256:176c12b1f1c880bf7a76d9f7c75822b6a2bc3db2d28baa4d300e8ce4cde7409b \ - --hash=sha256:354d84fac2b0d76062e9b3221f4abbbacdfd2a4d8af36bab0474f3a0bb30ab38 \ - --hash=sha256:4fadd8d83e1992eed0248bc50a4a6361dc31bcccc84388c54c86e530b7f58863 \ - --hash=sha256:54330f07e4949d09614707c48b06d1a22f8ffb5763c159efd5c0928326a91470 \ - --hash=sha256:610e700f02469c4a997e58e328cac6f305f649826853813177e6290416e846c6 \ - --hash=sha256:7fc3add9e6003e026da5fc9e59b131b8f22b428b991ccd53e2af8071687b4fce \ - --hash=sha256:9e8f199bf7f97bd7ecebffcae45ebf9527603549b2b562df0fbc6d4d688f14ca \ - --hash=sha256:a109916aaac42bff84702fb5187f3edadbc7c97fc2c99c5ff81dd15dcce0d1e5 \ - --hash=sha256:b848dbe1d57ed7c191dfc4ea64b8b004a3f9ece4bf4d0d80a367b76df20bf36e \ - --hash=sha256:f3ecdef226b9af856075f28227ff2c90ce3a594d092c39bee5513573f25e2714 - # via - # gcp-docuploader - # gcp-releasetool - # google-api-core - # googleapis-common-protos - # proto-plus -pyasn1==0.6.0 \ - --hash=sha256:3a35ab2c4b5ef98e17dfdec8ab074046fbda76e281c5a706ccd82328cfc8f64c \ - --hash=sha256:cca4bb0f2df5504f02f6f8a775b6e416ff9b0b3b16f7ee80b5a3153d9b804473 - # via - # pyasn1-modules - # rsa -pyasn1-modules==0.4.0 \ - --hash=sha256:831dbcea1b177b28c9baddf4c6d1013c24c3accd14a1873fffaa6a2e905f17b6 \ - --hash=sha256:be04f15b66c206eed667e0bb5ab27e2b1855ea54a842e5037738099e8ca4ae0b - # via google-auth -pycparser==2.22 \ - --hash=sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6 \ - --hash=sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc - # via cffi -pygments==2.18.0 \ - --hash=sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199 \ - --hash=sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a - # via - # readme-renderer - # rich -pyjwt==2.8.0 \ - --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ - --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 - # via gcp-releasetool -pyperclip==1.9.0 \ - --hash=sha256:b7de0142ddc81bfc5c7507eea19da920b92252b548b96186caf94a5e2527d310 - # via gcp-releasetool -python-dateutil==2.9.0.post0 \ - --hash=sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3 \ - --hash=sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427 - # via gcp-releasetool -readme-renderer==44.0 \ - --hash=sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151 \ - --hash=sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1 - # via twine -requests==2.32.3 \ - --hash=sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760 \ - --hash=sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6 - # via - # gcp-releasetool - # google-api-core - # google-cloud-storage - # requests-toolbelt - # twine -requests-toolbelt==1.0.0 \ - --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ - --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 - # via twine -rfc3986==2.0.0 \ - --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ - --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c - # via twine -rich==13.7.1 \ - --hash=sha256:4edbae314f59eb482f54e9e30bf00d33350aaa94f4bfcd4e9e3110e64d0d7222 \ - --hash=sha256:9be308cb1fe2f1f57d67ce99e95af38a1e2bc71ad9813b0e247cf7ffbcc3a432 - # via twine -rsa==4.9 \ - --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ - --hash=sha256:e38464a49c6c85d7f1351b0126661487a7e0a14a50f1675ec50eb34d4f20ef21 - # via google-auth -secretstorage==3.3.3 \ - --hash=sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77 \ - --hash=sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99 - # via keyring -six==1.16.0 \ - --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ - --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 - # via - # gcp-docuploader - # python-dateutil -tomli==2.0.1 \ - --hash=sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc \ - --hash=sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f - # via nox -twine==5.1.1 \ - --hash=sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997 \ - --hash=sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db - # via -r requirements.in -typing-extensions==4.12.2 \ - --hash=sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d \ - --hash=sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8 - # via -r requirements.in -urllib3==2.2.2 \ - --hash=sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472 \ - --hash=sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168 - # via - # requests - # twine -virtualenv==20.26.3 \ - --hash=sha256:4c43a2a236279d9ea36a0d76f98d84bd6ca94ac4e0f4a3b9d46d05e10fea542a \ - --hash=sha256:8cc4a31139e796e9a7de2cd5cf2489de1217193116a8fd42328f1bd65f434589 - # via nox -wheel==0.43.0 \ - --hash=sha256:465ef92c69fa5c5da2d1cf8ac40559a8c940886afcef87dcf14b9470862f1d85 \ - --hash=sha256:55c570405f142630c6b9f72fe09d9b67cf1477fcf543ae5b8dcb1f5b7377da81 - # via -r requirements.in -zipp==3.19.2 \ - --hash=sha256:bf1dcf6450f873a13e952a29504887c89e6de7506209e5b1bcc3460135d4de19 \ - --hash=sha256:f091755f667055f2d02b32c53771a7a6c8b47e1fdbc4b72a8b9072b3eef8015c - # via importlib-metadata - -# The following packages are considered to be unsafe in a requirements file: -setuptools==70.2.0 \ - --hash=sha256:b8b8060bb426838fbe942479c90296ce976249451118ef566a5a0b7d8b78fb05 \ - --hash=sha256:bd63e505105011b25c3c11f753f7e3b8465ea739efddaccef8f0efac2137bac1 - # via -r requirements.in diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 8be9b88803..00d392a248 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.52.0" + ".": "3.53.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index aef63c02e1..0bde684970 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,82 @@ [1]: https://pypi.org/project/google-cloud-spanner/#history +## [3.53.0](https://github.com/googleapis/python-spanner/compare/v3.52.0...v3.53.0) (2025-03-12) + + +### Features + +* Add AddSplitPoints API ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Add Attempt, Operation and GFE Metrics ([#1302](https://github.com/googleapis/python-spanner/issues/1302)) ([fb21d9a](https://github.com/googleapis/python-spanner/commit/fb21d9acf2545cf7b8e9e21b65eabf21a7bf895f)) +* Add REST Interceptors which support reading metadata ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Add support for opt-in debug logging ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Add support for reading selective GAPIC generation methods from service YAML ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Add the last statement option to ExecuteSqlRequest and ExecuteBatchDmlRequest ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Add UUID in Spanner TypeCode enum ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* End to end tracing ([#1315](https://github.com/googleapis/python-spanner/issues/1315)) ([aa5d0e6](https://github.com/googleapis/python-spanner/commit/aa5d0e6c1d3e5b0e4b0578e80c21e7c523c30fb5)) +* Exposing FreeInstanceAvailability in InstanceConfig ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Exposing FreeInstanceMetadata in Instance configuration (to define the metadata related to FREE instance type) ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Exposing InstanceType in Instance configuration (to define PROVISIONED or FREE spanner instance) ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Exposing QuorumType in InstanceConfig ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Exposing storage_limit_per_processing_unit in InstanceConfig ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Snapshot isolation ([#1318](https://github.com/googleapis/python-spanner/issues/1318)) ([992fcae](https://github.com/googleapis/python-spanner/commit/992fcae2d4fd2b47380d159a3416b8d6d6e1c937)) +* **spanner:** A new enum `IsolationLevel` is added ([#1224](https://github.com/googleapis/python-spanner/issues/1224)) ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) + + +### Bug Fixes + +* Allow Protobuf 6.x ([#1320](https://github.com/googleapis/python-spanner/issues/1320)) ([1faab91](https://github.com/googleapis/python-spanner/commit/1faab91790ae3e2179fbab11b69bb02254ab048a)) +* Cleanup after metric integration test ([#1322](https://github.com/googleapis/python-spanner/issues/1322)) ([d7cf8b9](https://github.com/googleapis/python-spanner/commit/d7cf8b968dfc2b98d3b1d7ae8a025da55bec0767)) +* **deps:** Require grpc-google-iam-v1>=0.14.0 ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Fix typing issue with gRPC metadata when key ends in -bin ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) + + +### Performance Improvements + +* Add option for last_statement ([#1313](https://github.com/googleapis/python-spanner/issues/1313)) ([19ab6ef](https://github.com/googleapis/python-spanner/commit/19ab6ef0d58262ebb19183e700db6cf124f9b3c5)) + + +### Documentation + +* A comment for enum `DefaultBackupScheduleType` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for enum value `AUTOMATIC` in enum `DefaultBackupScheduleType` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for enum value `GOOGLE_MANAGED` in enum `Type` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for enum value `NONE` in enum `DefaultBackupScheduleType` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for enum value `USER_MANAGED` in enum `Type` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `base_config` in message `.google.spanner.admin.instance.v1.InstanceConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `default_backup_schedule_type` in message `.google.spanner.admin.instance.v1.Instance` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `filter` in message `.google.spanner.admin.instance.v1.ListInstanceConfigOperationsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `filter` in message `.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `instance_config` in message `.google.spanner.admin.instance.v1.CreateInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `instance_partition_deadline` in message `.google.spanner.admin.instance.v1.ListInstancePartitionOperationsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `location` in message `.google.spanner.admin.instance.v1.ReplicaInfo` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `node_count` in message `.google.spanner.admin.instance.v1.Instance` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `node_count` in message `.google.spanner.admin.instance.v1.InstancePartition` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `operations` in message `.google.spanner.admin.instance.v1.ListInstanceConfigOperationsResponse` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `operations` in message `.google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `optional_replicas` in message `.google.spanner.admin.instance.v1.InstanceConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `parent` in message `.google.spanner.admin.instance.v1.ListInstancePartitionsRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `processing_units` in message `.google.spanner.admin.instance.v1.Instance` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `processing_units` in message `.google.spanner.admin.instance.v1.InstancePartition` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `referencing_backups` in message `.google.spanner.admin.instance.v1.InstancePartition` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `replicas` in message `.google.spanner.admin.instance.v1.InstanceConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `storage_utilization_percent` in message `.google.spanner.admin.instance.v1.AutoscalingConfig` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for field `unreachable` in message `.google.spanner.admin.instance.v1.ListInstancePartitionsResponse` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for message `CreateInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for message `DeleteInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for message `UpdateInstanceConfigRequest` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `CreateInstance` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `CreateInstanceConfig` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `CreateInstancePartition` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `ListInstanceConfigOperations` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `ListInstanceConfigs` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `ListInstancePartitionOperations` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `MoveInstance` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `UpdateInstance` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `UpdateInstanceConfig` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* A comment for method `UpdateInstancePartition` in service `InstanceAdmin` is changed ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) +* Fix typo timzeone -> timezone ([7a5afba](https://github.com/googleapis/python-spanner/commit/7a5afba28b20ac94f3eec799f4b572c95af60b94)) + ## [3.52.0](https://github.com/googleapis/python-spanner/compare/v3.51.0...v3.52.0) (2025-02-19) diff --git a/docs/opentelemetry-tracing.rst b/docs/opentelemetry-tracing.rst index c715ad58ad..c581d2cb87 100644 --- a/docs/opentelemetry-tracing.rst +++ b/docs/opentelemetry-tracing.rst @@ -38,6 +38,10 @@ We also need to tell OpenTelemetry which exporter to use. To export Spanner trac # can modify it though using the environment variable # SPANNER_ENABLE_EXTENDED_TRACING=false. enable_extended_tracing=False, + + # By default end to end tracing is set to False. Set to True + # for getting spans for Spanner server. + enable_end_to_end_tracing=True, ) spanner = spanner.NewClient(project_id, observability_options=observability_options) @@ -71,3 +75,22 @@ leak. Sadly due to legacy behavior, we cannot simply turn off this behavior by d SPANNER_ENABLE_EXTENDED_TRACING=false to turn it off globally or when creating each SpannerClient, please set `observability_options.enable_extended_tracing=false` + +End to end tracing +~~~~~~~~~~~~~~~~~~~~~~~~~ + +In addition to client-side tracing, you can opt in for end-to-end tracing. End-to-end tracing helps you understand and debug latency issues that are specific to Spanner. Refer [here](https://cloud.google.com/spanner/docs/tracing-overview) for more information. + +To configure end-to-end tracing. + +1. Opt in for end-to-end tracing. You can opt-in by either: +* Setting the environment variable `SPANNER_ENABLE_END_TO_END_TRACING=true` before your application is started +* In code, by setting `observability_options.enable_end_to_end_tracing=true` when creating each SpannerClient. + +2. Set the trace context propagation in OpenTelemetry. + +.. code:: python + + from opentelemetry.propagate import set_global_textmap + from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator + set_global_textmap(TraceContextTextMapPropagator()) \ No newline at end of file diff --git a/examples/trace.py b/examples/trace.py index e7659e13e2..5b826ca5ad 100644 --- a/examples/trace.py +++ b/examples/trace.py @@ -18,16 +18,19 @@ import google.cloud.spanner as spanner from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace import TracerProvider from opentelemetry.sdk.trace.export import BatchSpanProcessor from opentelemetry.sdk.trace.sampling import ALWAYS_ON from opentelemetry import trace +from opentelemetry.propagate import set_global_textmap +from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator +# Setup common variables that'll be used between Spanner and traces. +project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project') -def main(): - # Setup common variables that'll be used between Spanner and traces. - project_id = os.environ.get('SPANNER_PROJECT_ID', 'test-project') - +def spanner_with_cloud_trace(): + # [START spanner_opentelemetry_traces_cloudtrace_usage] # Setup OpenTelemetry, trace and Cloud Trace exporter. tracer_provider = TracerProvider(sampler=ALWAYS_ON) trace_exporter = CloudTraceSpanExporter(project_id=project_id) @@ -36,10 +39,42 @@ def main(): # Setup the Cloud Spanner Client. spanner_client = spanner.Client( project_id, - observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True), + observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True), + ) + + # [END spanner_opentelemetry_traces_cloudtrace_usage] + return spanner_client + +def spanner_with_otlp(): + # [START spanner_opentelemetry_traces_otlp_usage] + # Setup OpenTelemetry, trace and OTLP exporter. + tracer_provider = TracerProvider(sampler=ALWAYS_ON) + otlp_exporter = OTLPSpanExporter(endpoint="http://localhost:4317") + tracer_provider.add_span_processor(BatchSpanProcessor(otlp_exporter)) + + # Setup the Cloud Spanner Client. + spanner_client = spanner.Client( + project_id, + observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True, enable_end_to_end_tracing=True), ) + # [END spanner_opentelemetry_traces_otlp_usage] + return spanner_client + + +def main(): + # Setup OpenTelemetry, trace and Cloud Trace exporter. + tracer_provider = TracerProvider(sampler=ALWAYS_ON) + trace_exporter = CloudTraceSpanExporter(project_id=project_id) + tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter)) + + # Setup the Cloud Spanner Client. + # Change to "spanner_client = spanner_with_otlp" to use OTLP exporter + spanner_client = spanner_with_cloud_trace() instance = spanner_client.instance('test-instance') database = instance.database('test-db') + + # Set W3C Trace Context as the global propagator for end to end tracing. + set_global_textmap(TraceContextTextMapPropagator()) # Retrieve a tracer from our custom tracer provider. tracer = tracer_provider.get_tracer('MyApp') diff --git a/google/cloud/spanner_admin_database_v1/__init__.py b/google/cloud/spanner_admin_database_v1/__init__.py index d81a0e2dcc..3d6ac19f3c 100644 --- a/google/cloud/spanner_admin_database_v1/__init__.py +++ b/google/cloud/spanner_admin_database_v1/__init__.py @@ -23,6 +23,7 @@ from .types.backup import Backup from .types.backup import BackupInfo +from .types.backup import BackupInstancePartition from .types.backup import CopyBackupEncryptionConfig from .types.backup import CopyBackupMetadata from .types.backup import CopyBackupRequest @@ -51,6 +52,8 @@ from .types.common import EncryptionInfo from .types.common import OperationProgress from .types.common import DatabaseDialect +from .types.spanner_database_admin import AddSplitPointsRequest +from .types.spanner_database_admin import AddSplitPointsResponse from .types.spanner_database_admin import CreateDatabaseMetadata from .types.spanner_database_admin import CreateDatabaseRequest from .types.spanner_database_admin import Database @@ -71,6 +74,7 @@ from .types.spanner_database_admin import RestoreDatabaseMetadata from .types.spanner_database_admin import RestoreDatabaseRequest from .types.spanner_database_admin import RestoreInfo +from .types.spanner_database_admin import SplitPoints from .types.spanner_database_admin import UpdateDatabaseDdlMetadata from .types.spanner_database_admin import UpdateDatabaseDdlRequest from .types.spanner_database_admin import UpdateDatabaseMetadata @@ -79,8 +83,11 @@ __all__ = ( "DatabaseAdminAsyncClient", + "AddSplitPointsRequest", + "AddSplitPointsResponse", "Backup", "BackupInfo", + "BackupInstancePartition", "BackupSchedule", "BackupScheduleSpec", "CopyBackupEncryptionConfig", @@ -129,6 +136,7 @@ "RestoreDatabaseRequest", "RestoreInfo", "RestoreSourceType", + "SplitPoints", "UpdateBackupRequest", "UpdateBackupScheduleRequest", "UpdateDatabaseDdlMetadata", diff --git a/google/cloud/spanner_admin_database_v1/gapic_metadata.json b/google/cloud/spanner_admin_database_v1/gapic_metadata.json index e6096e59a2..e5e704ff96 100644 --- a/google/cloud/spanner_admin_database_v1/gapic_metadata.json +++ b/google/cloud/spanner_admin_database_v1/gapic_metadata.json @@ -10,6 +10,11 @@ "grpc": { "libraryClient": "DatabaseAdminClient", "rpcs": { + "AddSplitPoints": { + "methods": [ + "add_split_points" + ] + }, "CopyBackup": { "methods": [ "copy_backup" @@ -140,6 +145,11 @@ "grpc-async": { "libraryClient": "DatabaseAdminAsyncClient", "rpcs": { + "AddSplitPoints": { + "methods": [ + "add_split_points" + ] + }, "CopyBackup": { "methods": [ "copy_backup" @@ -270,6 +280,11 @@ "rest": { "libraryClient": "DatabaseAdminClient", "rpcs": { + "AddSplitPoints": { + "methods": [ + "add_split_points" + ] + }, "CopyBackup": { "methods": [ "copy_backup" diff --git a/google/cloud/spanner_admin_database_v1/gapic_version.py b/google/cloud/spanner_admin_database_v1/gapic_version.py index 5ea820ffea..9b205942db 100644 --- a/google/cloud/spanner_admin_database_v1/gapic_version.py +++ b/google/cloud/spanner_admin_database_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.52.0" # {x-release-please-version} +__version__ = "3.53.0" # {x-release-please-version} diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py index 649da0cbe8..584cd6711e 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -66,6 +67,15 @@ from .transports.grpc_asyncio import DatabaseAdminGrpcAsyncIOTransport from .client import DatabaseAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class DatabaseAdminAsyncClient: """Cloud Spanner Database Admin API @@ -107,6 +117,10 @@ class DatabaseAdminAsyncClient: ) instance_path = staticmethod(DatabaseAdminClient.instance_path) parse_instance_path = staticmethod(DatabaseAdminClient.parse_instance_path) + instance_partition_path = staticmethod(DatabaseAdminClient.instance_partition_path) + parse_instance_partition_path = staticmethod( + DatabaseAdminClient.parse_instance_partition_path + ) common_billing_account_path = staticmethod( DatabaseAdminClient.common_billing_account_path ) @@ -297,6 +311,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.spanner.admin.database_v1.DatabaseAdminAsyncClient`.", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "credentialsType": None, + }, + ) + async def list_databases( self, request: Optional[ @@ -306,7 +342,7 @@ async def list_databases( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatabasesAsyncPager: r"""Lists Cloud Spanner databases. @@ -352,8 +388,10 @@ async def sample_list_databases(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesAsyncPager: @@ -431,7 +469,7 @@ async def create_database( create_statement: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new Cloud Spanner database and starts to prepare it for serving. The returned [long-running @@ -502,8 +540,10 @@ async def sample_create_database(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -579,7 +619,7 @@ async def get_database( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.Database: r"""Gets the state of a Cloud Spanner database. @@ -624,8 +664,10 @@ async def sample_get_database(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.Database: @@ -687,7 +729,7 @@ async def update_database( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a Cloud Spanner database. The returned [long-running operation][google.longrunning.Operation] can be used to track @@ -783,8 +825,10 @@ async def sample_update_database(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -863,7 +907,7 @@ async def update_database_ddl( statements: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The @@ -942,8 +986,10 @@ async def sample_update_database_ddl(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1026,7 +1072,7 @@ async def drop_database( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be retained according to their @@ -1068,8 +1114,10 @@ async def sample_drop_database(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1123,7 +1171,7 @@ async def get_database_ddl( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.GetDatabaseDdlResponse: r"""Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This method does not show pending @@ -1171,8 +1219,10 @@ async def sample_get_database_ddl(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse: @@ -1233,7 +1283,7 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a database or backup resource. Replaces any existing policy. @@ -1287,8 +1337,10 @@ async def sample_set_iam_policy(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -1374,7 +1426,7 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists @@ -1429,8 +1481,10 @@ async def sample_get_iam_policy(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -1517,7 +1571,7 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified database or backup resource. @@ -1582,8 +1636,10 @@ async def sample_test_iam_permissions(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -1643,7 +1699,7 @@ async def create_backup( backup_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Starts creating a new Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have @@ -1723,8 +1779,10 @@ async def sample_create_backup(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1803,7 +1861,7 @@ async def copy_backup( expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Starts copying a Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have @@ -1898,8 +1956,10 @@ async def sample_copy_backup(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1977,7 +2037,7 @@ async def get_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup.Backup: r"""Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. @@ -2022,8 +2082,10 @@ async def sample_get_backup(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.Backup: @@ -2083,7 +2145,7 @@ async def update_backup( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup.Backup: r"""Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. @@ -2143,8 +2205,10 @@ async def sample_update_backup(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.Backup: @@ -2207,7 +2271,7 @@ async def delete_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. @@ -2250,8 +2314,10 @@ async def sample_delete_backup(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2303,7 +2369,7 @@ async def list_backups( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupsAsyncPager: r"""Lists completed and pending backups. Backups returned are ordered by ``create_time`` in descending order, starting from @@ -2350,8 +2416,10 @@ async def sample_list_backups(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsAsyncPager: @@ -2430,7 +2498,7 @@ async def restore_database( backup: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with @@ -2519,8 +2587,10 @@ async def sample_restore_database(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2598,7 +2668,7 @@ async def list_database_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatabaseOperationsAsyncPager: r"""Lists database [longrunning-operations][google.longrunning.Operation]. A @@ -2653,8 +2723,10 @@ async def sample_list_database_operations(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsAsyncPager: @@ -2731,7 +2803,7 @@ async def list_backup_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupOperationsAsyncPager: r"""Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. @@ -2788,8 +2860,10 @@ async def sample_list_backup_operations(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager: @@ -2866,7 +2940,7 @@ async def list_database_roles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatabaseRolesAsyncPager: r"""Lists Cloud Spanner database roles. @@ -2912,8 +2986,10 @@ async def sample_list_database_roles(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesAsyncPager: @@ -2981,6 +3057,128 @@ async def sample_list_database_roles(): # Done; return the response. return response + async def add_split_points( + self, + request: Optional[ + Union[spanner_database_admin.AddSplitPointsRequest, dict] + ] = None, + *, + database: Optional[str] = None, + split_points: Optional[ + MutableSequence[spanner_database_admin.SplitPoints] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> spanner_database_admin.AddSplitPointsResponse: + r"""Adds split points to specified tables, indexes of a + database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_admin_database_v1 + + async def sample_add_split_points(): + # Create a client + client = spanner_admin_database_v1.DatabaseAdminAsyncClient() + + # Initialize request argument(s) + request = spanner_admin_database_v1.AddSplitPointsRequest( + database="database_value", + ) + + # Make the request + response = await client.add_split_points(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]]): + The request object. The request for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + database (:class:`str`): + Required. The database on whose tables/indexes split + points are to be added. Values are of the form + ``projects//instances//databases/``. + + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + split_points (:class:`MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]`): + Required. The split points to add. + This corresponds to the ``split_points`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse: + The response for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database, split_points]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, spanner_database_admin.AddSplitPointsRequest): + request = spanner_database_admin.AddSplitPointsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if database is not None: + request.database = database + if split_points: + request.split_points.extend(split_points) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._client._transport._wrapped_methods[ + self._client._transport.add_split_points + ] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Validate the universe domain. + self._client._validate_universe_domain() + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def create_backup_schedule( self, request: Optional[ @@ -2992,7 +3190,7 @@ async def create_backup_schedule( backup_schedule_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup_schedule.BackupSchedule: r"""Creates a new backup schedule. @@ -3053,8 +3251,10 @@ async def sample_create_backup_schedule(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.BackupSchedule: @@ -3120,7 +3320,7 @@ async def get_backup_schedule( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup_schedule.BackupSchedule: r"""Gets backup schedule for the input schedule name. @@ -3165,8 +3365,10 @@ async def sample_get_backup_schedule(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.BackupSchedule: @@ -3231,7 +3433,7 @@ async def update_backup_schedule( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup_schedule.BackupSchedule: r"""Updates a backup schedule. @@ -3289,8 +3491,10 @@ async def sample_update_backup_schedule(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.BackupSchedule: @@ -3358,7 +3562,7 @@ async def delete_backup_schedule( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a backup schedule. @@ -3400,8 +3604,10 @@ async def sample_delete_backup_schedule(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -3455,7 +3661,7 @@ async def list_backup_schedules( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupSchedulesAsyncPager: r"""Lists all the backup schedules for the database. @@ -3502,8 +3708,10 @@ async def sample_list_backup_schedules(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesAsyncPager: @@ -3577,7 +3785,7 @@ async def list_operations( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: r"""Lists operations that match the specified filter in the request. @@ -3588,8 +3796,10 @@ async def list_operations( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.ListOperationsResponse: Response message for ``ListOperations`` method. @@ -3630,7 +3840,7 @@ async def get_operation( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Gets the latest state of a long-running operation. @@ -3641,8 +3851,10 @@ async def get_operation( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: An ``Operation`` object. @@ -3683,7 +3895,7 @@ async def delete_operation( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a long-running operation. @@ -3699,8 +3911,10 @@ async def delete_operation( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: None """ @@ -3737,7 +3951,7 @@ async def cancel_operation( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Starts asynchronous cancellation on a long-running operation. @@ -3752,8 +3966,10 @@ async def cancel_operation( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: None """ diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py index 4fb132b1cb..1eced63261 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/client.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/client.py @@ -14,6 +14,9 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( @@ -48,6 +51,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.spanner_admin_database_v1.services.database_admin import pagers @@ -364,6 +376,28 @@ def parse_instance_path(path: str) -> Dict[str, str]: m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) return m.groupdict() if m else {} + @staticmethod + def instance_partition_path( + project: str, + instance: str, + instance_partition: str, + ) -> str: + """Returns a fully-qualified instance_partition string.""" + return "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}".format( + project=project, + instance=instance, + instance_partition=instance_partition, + ) + + @staticmethod + def parse_instance_partition_path(path: str) -> Dict[str, str]: + """Parses a instance_partition path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/instances/(?P.+?)/instancePartitions/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def common_billing_account_path( billing_account: str, @@ -620,52 +654,45 @@ def _get_universe_domain( raise ValueError("Universe Domain cannot be an empty string.") return universe_domain - @staticmethod - def _compare_universes( - client_universe: str, credentials: ga_credentials.Credentials - ) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. Returns: - bool: True iff client_universe matches the universe in credentials. + bool: True iff the configured universe domain is valid. Raises: - ValueError: when client_universe does not match the universe in credentials. + ValueError: If the configured universe domain is not valid. """ - default_universe = DatabaseAdminClient._DEFAULT_UNIVERSE - credentials_universe = getattr(credentials, "universe_domain", default_universe) - - if client_universe != credentials_universe: - raise ValueError( - "The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default." - ) + # NOTE (b/349488459): universe validation is disabled until further notice. return True - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. - Raises: - ValueError: If the configured universe domain is not valid. + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. """ - self._is_universe_domain_valid = ( - self._is_universe_domain_valid - or DatabaseAdminClient._compare_universes( - self.universe_domain, self.transport._credentials - ) - ) - return self._is_universe_domain_valid + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) @property def api_endpoint(self): @@ -771,6 +798,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -836,6 +867,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.spanner.admin.database_v1.DatabaseAdminClient`.", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "credentialsType": None, + }, + ) + def list_databases( self, request: Optional[ @@ -845,7 +899,7 @@ def list_databases( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatabasesPager: r"""Lists Cloud Spanner databases. @@ -891,8 +945,10 @@ def sample_list_databases(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesPager: @@ -967,7 +1023,7 @@ def create_database( create_statement: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new Cloud Spanner database and starts to prepare it for serving. The returned [long-running @@ -1038,8 +1094,10 @@ def sample_create_database(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1112,7 +1170,7 @@ def get_database( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.Database: r"""Gets the state of a Cloud Spanner database. @@ -1157,8 +1215,10 @@ def sample_get_database(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.Database: @@ -1217,7 +1277,7 @@ def update_database( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a Cloud Spanner database. The returned [long-running operation][google.longrunning.Operation] can be used to track @@ -1313,8 +1373,10 @@ def sample_update_database(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1390,7 +1452,7 @@ def update_database_ddl( statements: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates the schema of a Cloud Spanner database by creating/altering/dropping tables, columns, indexes, etc. The @@ -1469,8 +1531,10 @@ def sample_update_database_ddl(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1550,7 +1614,7 @@ def drop_database( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Drops (aka deletes) a Cloud Spanner database. Completed backups for the database will be retained according to their @@ -1592,8 +1656,10 @@ def sample_drop_database(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1644,7 +1710,7 @@ def get_database_ddl( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.GetDatabaseDdlResponse: r"""Returns the schema of a Cloud Spanner database as a list of formatted DDL statements. This method does not show pending @@ -1692,8 +1758,10 @@ def sample_get_database_ddl(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse: @@ -1751,7 +1819,7 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a database or backup resource. Replaces any existing policy. @@ -1805,8 +1873,10 @@ def sample_set_iam_policy(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -1893,7 +1963,7 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a database or backup resource. Returns an empty policy if a database or backup exists @@ -1948,8 +2018,10 @@ def sample_get_iam_policy(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2037,7 +2109,7 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified database or backup resource. @@ -2102,8 +2174,10 @@ def sample_test_iam_permissions(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2164,7 +2238,7 @@ def create_backup( backup_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Starts creating a new Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have @@ -2244,8 +2318,10 @@ def sample_create_backup(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2321,7 +2397,7 @@ def copy_backup( expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Starts copying a Cloud Spanner Backup. The returned backup [long-running operation][google.longrunning.Operation] will have @@ -2416,8 +2492,10 @@ def sample_copy_backup(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2492,7 +2570,7 @@ def get_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup.Backup: r"""Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. @@ -2537,8 +2615,10 @@ def sample_get_backup(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.Backup: @@ -2595,7 +2675,7 @@ def update_backup( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup.Backup: r"""Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. @@ -2655,8 +2735,10 @@ def sample_update_backup(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.Backup: @@ -2716,7 +2798,7 @@ def delete_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. @@ -2759,8 +2841,10 @@ def sample_delete_backup(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2809,7 +2893,7 @@ def list_backups( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupsPager: r"""Lists completed and pending backups. Backups returned are ordered by ``create_time`` in descending order, starting from @@ -2856,8 +2940,10 @@ def sample_list_backups(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsPager: @@ -2933,7 +3019,7 @@ def restore_database( backup: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Create a new database by restoring from a completed backup. The new database must be in the same project and in an instance with @@ -3022,8 +3108,10 @@ def sample_restore_database(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3098,7 +3186,7 @@ def list_database_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatabaseOperationsPager: r"""Lists database [longrunning-operations][google.longrunning.Operation]. A @@ -3153,8 +3241,10 @@ def sample_list_database_operations(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsPager: @@ -3228,7 +3318,7 @@ def list_backup_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupOperationsPager: r"""Lists the backup [long-running operations][google.longrunning.Operation] in the given instance. @@ -3285,8 +3375,10 @@ def sample_list_backup_operations(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsPager: @@ -3360,7 +3452,7 @@ def list_database_roles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListDatabaseRolesPager: r"""Lists Cloud Spanner database roles. @@ -3406,8 +3498,10 @@ def sample_list_database_roles(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesPager: @@ -3472,6 +3566,125 @@ def sample_list_database_roles(): # Done; return the response. return response + def add_split_points( + self, + request: Optional[ + Union[spanner_database_admin.AddSplitPointsRequest, dict] + ] = None, + *, + database: Optional[str] = None, + split_points: Optional[ + MutableSequence[spanner_database_admin.SplitPoints] + ] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> spanner_database_admin.AddSplitPointsResponse: + r"""Adds split points to specified tables, indexes of a + database. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import spanner_admin_database_v1 + + def sample_add_split_points(): + # Create a client + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Initialize request argument(s) + request = spanner_admin_database_v1.AddSplitPointsRequest( + database="database_value", + ) + + # Make the request + response = client.add_split_points(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest, dict]): + The request object. The request for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + database (str): + Required. The database on whose tables/indexes split + points are to be added. Values are of the form + ``projects//instances//databases/``. + + This corresponds to the ``database`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]): + Required. The split points to add. + This corresponds to the ``split_points`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse: + The response for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + + """ + # Create or coerce a protobuf request object. + # - Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([database, split_points]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # - Use the request object if provided (there's no risk of modifying the input as + # there are no flattened fields), or create one. + if not isinstance(request, spanner_database_admin.AddSplitPointsRequest): + request = spanner_database_admin.AddSplitPointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if database is not None: + request.database = database + if split_points is not None: + request.split_points = split_points + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_split_points] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("database", request.database),)), + ) + + # Validate the universe domain. + self._validate_universe_domain() + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def create_backup_schedule( self, request: Optional[ @@ -3483,7 +3696,7 @@ def create_backup_schedule( backup_schedule_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup_schedule.BackupSchedule: r"""Creates a new backup schedule. @@ -3544,8 +3757,10 @@ def sample_create_backup_schedule(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.BackupSchedule: @@ -3608,7 +3823,7 @@ def get_backup_schedule( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup_schedule.BackupSchedule: r"""Gets backup schedule for the input schedule name. @@ -3653,8 +3868,10 @@ def sample_get_backup_schedule(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.BackupSchedule: @@ -3716,7 +3933,7 @@ def update_backup_schedule( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup_schedule.BackupSchedule: r"""Updates a backup schedule. @@ -3774,8 +3991,10 @@ def sample_update_backup_schedule(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.types.BackupSchedule: @@ -3840,7 +4059,7 @@ def delete_backup_schedule( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a backup schedule. @@ -3882,8 +4101,10 @@ def sample_delete_backup_schedule(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -3934,7 +4155,7 @@ def list_backup_schedules( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupSchedulesPager: r"""Lists all the backup schedules for the database. @@ -3981,8 +4202,10 @@ def sample_list_backup_schedules(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesPager: @@ -4066,7 +4289,7 @@ def list_operations( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: r"""Lists operations that match the specified filter in the request. @@ -4077,8 +4300,10 @@ def list_operations( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.ListOperationsResponse: Response message for ``ListOperations`` method. @@ -4102,16 +4327,20 @@ def list_operations( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def get_operation( self, @@ -4119,7 +4348,7 @@ def get_operation( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Gets the latest state of a long-running operation. @@ -4130,8 +4359,10 @@ def get_operation( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: An ``Operation`` object. @@ -4155,16 +4386,20 @@ def get_operation( # Validate the universe domain. self._validate_universe_domain() - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) + try: + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) - # Done; return the response. - return response + # Done; return the response. + return response + except core_exceptions.GoogleAPICallError as e: + self._add_cred_info_for_auth_errors(e) + raise e def delete_operation( self, @@ -4172,7 +4407,7 @@ def delete_operation( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a long-running operation. @@ -4188,8 +4423,10 @@ def delete_operation( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: None """ @@ -4226,7 +4463,7 @@ def cancel_operation( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Starts asynchronous cancellation on a long-running operation. @@ -4241,8 +4478,10 @@ def cancel_operation( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: None """ diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py index 0fffae2ba6..fe760684db 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/pagers.py @@ -69,7 +69,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -83,8 +83,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_database_admin.ListDatabasesRequest(request) @@ -143,7 +145,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -157,8 +159,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_database_admin.ListDatabasesRequest(request) @@ -223,7 +227,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -237,8 +241,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = backup.ListBackupsRequest(request) @@ -297,7 +303,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -311,8 +317,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = backup.ListBackupsRequest(request) @@ -375,7 +383,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -389,8 +397,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_database_admin.ListDatabaseOperationsRequest(request) @@ -451,7 +461,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -465,8 +475,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_database_admin.ListDatabaseOperationsRequest(request) @@ -531,7 +543,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -545,8 +557,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = backup.ListBackupOperationsRequest(request) @@ -605,7 +619,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -619,8 +633,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = backup.ListBackupOperationsRequest(request) @@ -683,7 +699,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -697,8 +713,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_database_admin.ListDatabaseRolesRequest(request) @@ -759,7 +777,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -773,8 +791,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_database_admin.ListDatabaseRolesRequest(request) @@ -839,7 +859,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -853,8 +873,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = backup_schedule.ListBackupSchedulesRequest(request) @@ -913,7 +935,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -927,8 +949,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = backup_schedule.ListBackupSchedulesRequest(request) diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py index cdd10bdcf7..e0c3e7c1d9 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/base.py @@ -383,6 +383,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=3600.0, client_info=client_info, ), + self.add_split_points: gapic_v1.method.wrap_method( + self.add_split_points, + default_retry=retries.Retry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=3600.0, + ), + default_timeout=3600.0, + client_info=client_info, + ), self.create_backup_schedule: gapic_v1.method.wrap_method( self.create_backup_schedule, default_retry=retries.Retry( @@ -692,6 +707,18 @@ def list_database_roles( ]: raise NotImplementedError() + @property + def add_split_points( + self, + ) -> Callable[ + [spanner_database_admin.AddSplitPointsRequest], + Union[ + spanner_database_admin.AddSplitPointsResponse, + Awaitable[spanner_database_admin.AddSplitPointsResponse], + ], + ]: + raise NotImplementedError() + @property def create_backup_schedule( self, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py index 344b0c8d25..00d7e84672 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.spanner_admin_database_v1.types import backup from google.cloud.spanner_admin_database_v1.types import backup as gsad_backup @@ -38,6 +44,81 @@ from google.protobuf import empty_pb2 # type: ignore from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class DatabaseAdminGrpcTransport(DatabaseAdminTransport): """gRPC backend transport for DatabaseAdmin. @@ -199,7 +280,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -263,7 +349,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -290,7 +378,7 @@ def list_databases( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_databases" not in self._stubs: - self._stubs["list_databases"] = self.grpc_channel.unary_unary( + self._stubs["list_databases"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", request_serializer=spanner_database_admin.ListDatabasesRequest.serialize, response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize, @@ -327,7 +415,7 @@ def create_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_database" not in self._stubs: - self._stubs["create_database"] = self.grpc_channel.unary_unary( + self._stubs["create_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -355,7 +443,7 @@ def get_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_database" not in self._stubs: - self._stubs["get_database"] = self.grpc_channel.unary_unary( + self._stubs["get_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", request_serializer=spanner_database_admin.GetDatabaseRequest.serialize, response_deserializer=spanner_database_admin.Database.deserialize, @@ -420,7 +508,7 @@ def update_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_database" not in self._stubs: - self._stubs["update_database"] = self.grpc_channel.unary_unary( + self._stubs["update_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase", request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -456,7 +544,7 @@ def update_database_ddl( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_database_ddl" not in self._stubs: - self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary( + self._stubs["update_database_ddl"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -485,7 +573,7 @@ def drop_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_database" not in self._stubs: - self._stubs["drop_database"] = self.grpc_channel.unary_unary( + self._stubs["drop_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", request_serializer=spanner_database_admin.DropDatabaseRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -517,7 +605,7 @@ def get_database_ddl( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_database_ddl" not in self._stubs: - self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary( + self._stubs["get_database_ddl"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize, response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize, @@ -551,7 +639,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -586,7 +674,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -624,7 +712,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -662,7 +750,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", request_serializer=gsad_backup.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -700,7 +788,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup", request_serializer=backup.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -725,7 +813,7 @@ def get_backup(self) -> Callable[[backup.GetBackupRequest], backup.Backup]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", request_serializer=backup.GetBackupRequest.serialize, response_deserializer=backup.Backup.deserialize, @@ -752,7 +840,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", request_serializer=gsad_backup.UpdateBackupRequest.serialize, response_deserializer=gsad_backup.Backup.deserialize, @@ -777,7 +865,7 @@ def delete_backup(self) -> Callable[[backup.DeleteBackupRequest], empty_pb2.Empt # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", request_serializer=backup.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -805,7 +893,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", request_serializer=backup.ListBackupsRequest.serialize, response_deserializer=backup.ListBackupsResponse.deserialize, @@ -851,7 +939,7 @@ def restore_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_database" not in self._stubs: - self._stubs["restore_database"] = self.grpc_channel.unary_unary( + self._stubs["restore_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -889,7 +977,7 @@ def list_database_operations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_database_operations" not in self._stubs: - self._stubs["list_database_operations"] = self.grpc_channel.unary_unary( + self._stubs["list_database_operations"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize, response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize, @@ -928,7 +1016,7 @@ def list_backup_operations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backup_operations" not in self._stubs: - self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary( + self._stubs["list_backup_operations"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", request_serializer=backup.ListBackupOperationsRequest.serialize, response_deserializer=backup.ListBackupOperationsResponse.deserialize, @@ -957,13 +1045,43 @@ def list_database_roles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_database_roles" not in self._stubs: - self._stubs["list_database_roles"] = self.grpc_channel.unary_unary( + self._stubs["list_database_roles"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles", request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize, response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize, ) return self._stubs["list_database_roles"] + @property + def add_split_points( + self, + ) -> Callable[ + [spanner_database_admin.AddSplitPointsRequest], + spanner_database_admin.AddSplitPointsResponse, + ]: + r"""Return a callable for the add split points method over gRPC. + + Adds split points to specified tables, indexes of a + database. + + Returns: + Callable[[~.AddSplitPointsRequest], + ~.AddSplitPointsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_split_points" not in self._stubs: + self._stubs["add_split_points"] = self._logged_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints", + request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize, + response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize, + ) + return self._stubs["add_split_points"] + @property def create_backup_schedule( self, @@ -986,7 +1104,7 @@ def create_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup_schedule" not in self._stubs: - self._stubs["create_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize, response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize, @@ -1014,7 +1132,7 @@ def get_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup_schedule" not in self._stubs: - self._stubs["get_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", request_serializer=backup_schedule.GetBackupScheduleRequest.serialize, response_deserializer=backup_schedule.BackupSchedule.deserialize, @@ -1043,7 +1161,7 @@ def update_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup_schedule" not in self._stubs: - self._stubs["update_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize, response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize, @@ -1069,7 +1187,7 @@ def delete_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup_schedule" not in self._stubs: - self._stubs["delete_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1098,7 +1216,7 @@ def list_backup_schedules( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backup_schedules" not in self._stubs: - self._stubs["list_backup_schedules"] = self.grpc_channel.unary_unary( + self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize, response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize, @@ -1106,7 +1224,7 @@ def list_backup_schedules( return self._stubs["list_backup_schedules"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def delete_operation( @@ -1118,7 +1236,7 @@ def delete_operation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_operation" not in self._stubs: - self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + self._stubs["delete_operation"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/DeleteOperation", request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, response_deserializer=None, @@ -1135,7 +1253,7 @@ def cancel_operation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_operation" not in self._stubs: - self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/CancelOperation", request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, response_deserializer=None, @@ -1152,7 +1270,7 @@ def get_operation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_operation" not in self._stubs: - self._stubs["get_operation"] = self.grpc_channel.unary_unary( + self._stubs["get_operation"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/GetOperation", request_serializer=operations_pb2.GetOperationRequest.SerializeToString, response_deserializer=operations_pb2.Operation.FromString, @@ -1171,7 +1289,7 @@ def list_operations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_operations" not in self._stubs: - self._stubs["list_operations"] = self.grpc_channel.unary_unary( + self._stubs["list_operations"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/ListOperations", request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, response_deserializer=operations_pb2.ListOperationsResponse.FromString, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py index de06a1d16a..624bc2d25b 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -24,8 +27,11 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.spanner_admin_database_v1.types import backup @@ -42,6 +48,82 @@ from .base import DatabaseAdminTransport, DEFAULT_CLIENT_INFO from .grpc import DatabaseAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class DatabaseAdminGrpcAsyncIOTransport(DatabaseAdminTransport): """gRPC AsyncIO backend transport for DatabaseAdmin. @@ -246,10 +328,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -272,7 +357,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -300,7 +385,7 @@ def list_databases( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_databases" not in self._stubs: - self._stubs["list_databases"] = self.grpc_channel.unary_unary( + self._stubs["list_databases"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", request_serializer=spanner_database_admin.ListDatabasesRequest.serialize, response_deserializer=spanner_database_admin.ListDatabasesResponse.deserialize, @@ -338,7 +423,7 @@ def create_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_database" not in self._stubs: - self._stubs["create_database"] = self.grpc_channel.unary_unary( + self._stubs["create_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", request_serializer=spanner_database_admin.CreateDatabaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -367,7 +452,7 @@ def get_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_database" not in self._stubs: - self._stubs["get_database"] = self.grpc_channel.unary_unary( + self._stubs["get_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", request_serializer=spanner_database_admin.GetDatabaseRequest.serialize, response_deserializer=spanner_database_admin.Database.deserialize, @@ -433,7 +518,7 @@ def update_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_database" not in self._stubs: - self._stubs["update_database"] = self.grpc_channel.unary_unary( + self._stubs["update_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase", request_serializer=spanner_database_admin.UpdateDatabaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -470,7 +555,7 @@ def update_database_ddl( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_database_ddl" not in self._stubs: - self._stubs["update_database_ddl"] = self.grpc_channel.unary_unary( + self._stubs["update_database_ddl"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", request_serializer=spanner_database_admin.UpdateDatabaseDdlRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -501,7 +586,7 @@ def drop_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_database" not in self._stubs: - self._stubs["drop_database"] = self.grpc_channel.unary_unary( + self._stubs["drop_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", request_serializer=spanner_database_admin.DropDatabaseRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -533,7 +618,7 @@ def get_database_ddl( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_database_ddl" not in self._stubs: - self._stubs["get_database_ddl"] = self.grpc_channel.unary_unary( + self._stubs["get_database_ddl"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", request_serializer=spanner_database_admin.GetDatabaseDdlRequest.serialize, response_deserializer=spanner_database_admin.GetDatabaseDdlResponse.deserialize, @@ -567,7 +652,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -602,7 +687,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -640,7 +725,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -680,7 +765,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", request_serializer=gsad_backup.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -718,7 +803,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup", request_serializer=backup.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -745,7 +830,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", request_serializer=backup.GetBackupRequest.serialize, response_deserializer=backup.Backup.deserialize, @@ -772,7 +857,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", request_serializer=gsad_backup.UpdateBackupRequest.serialize, response_deserializer=gsad_backup.Backup.deserialize, @@ -799,7 +884,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", request_serializer=backup.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -827,7 +912,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", request_serializer=backup.ListBackupsRequest.serialize, response_deserializer=backup.ListBackupsResponse.deserialize, @@ -874,7 +959,7 @@ def restore_database( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_database" not in self._stubs: - self._stubs["restore_database"] = self.grpc_channel.unary_unary( + self._stubs["restore_database"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", request_serializer=spanner_database_admin.RestoreDatabaseRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -912,7 +997,7 @@ def list_database_operations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_database_operations" not in self._stubs: - self._stubs["list_database_operations"] = self.grpc_channel.unary_unary( + self._stubs["list_database_operations"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", request_serializer=spanner_database_admin.ListDatabaseOperationsRequest.serialize, response_deserializer=spanner_database_admin.ListDatabaseOperationsResponse.deserialize, @@ -952,7 +1037,7 @@ def list_backup_operations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backup_operations" not in self._stubs: - self._stubs["list_backup_operations"] = self.grpc_channel.unary_unary( + self._stubs["list_backup_operations"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", request_serializer=backup.ListBackupOperationsRequest.serialize, response_deserializer=backup.ListBackupOperationsResponse.deserialize, @@ -981,13 +1066,43 @@ def list_database_roles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_database_roles" not in self._stubs: - self._stubs["list_database_roles"] = self.grpc_channel.unary_unary( + self._stubs["list_database_roles"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles", request_serializer=spanner_database_admin.ListDatabaseRolesRequest.serialize, response_deserializer=spanner_database_admin.ListDatabaseRolesResponse.deserialize, ) return self._stubs["list_database_roles"] + @property + def add_split_points( + self, + ) -> Callable[ + [spanner_database_admin.AddSplitPointsRequest], + Awaitable[spanner_database_admin.AddSplitPointsResponse], + ]: + r"""Return a callable for the add split points method over gRPC. + + Adds split points to specified tables, indexes of a + database. + + Returns: + Callable[[~.AddSplitPointsRequest], + Awaitable[~.AddSplitPointsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "add_split_points" not in self._stubs: + self._stubs["add_split_points"] = self._logged_channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/AddSplitPoints", + request_serializer=spanner_database_admin.AddSplitPointsRequest.serialize, + response_deserializer=spanner_database_admin.AddSplitPointsResponse.deserialize, + ) + return self._stubs["add_split_points"] + @property def create_backup_schedule( self, @@ -1010,7 +1125,7 @@ def create_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup_schedule" not in self._stubs: - self._stubs["create_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["create_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", request_serializer=gsad_backup_schedule.CreateBackupScheduleRequest.serialize, response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize, @@ -1039,7 +1154,7 @@ def get_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup_schedule" not in self._stubs: - self._stubs["get_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["get_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", request_serializer=backup_schedule.GetBackupScheduleRequest.serialize, response_deserializer=backup_schedule.BackupSchedule.deserialize, @@ -1068,7 +1183,7 @@ def update_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup_schedule" not in self._stubs: - self._stubs["update_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["update_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", request_serializer=gsad_backup_schedule.UpdateBackupScheduleRequest.serialize, response_deserializer=gsad_backup_schedule.BackupSchedule.deserialize, @@ -1096,7 +1211,7 @@ def delete_backup_schedule( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup_schedule" not in self._stubs: - self._stubs["delete_backup_schedule"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup_schedule"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", request_serializer=backup_schedule.DeleteBackupScheduleRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1125,7 +1240,7 @@ def list_backup_schedules( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backup_schedules" not in self._stubs: - self._stubs["list_backup_schedules"] = self.grpc_channel.unary_unary( + self._stubs["list_backup_schedules"] = self._logged_channel.unary_unary( "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", request_serializer=backup_schedule.ListBackupSchedulesRequest.serialize, response_deserializer=backup_schedule.ListBackupSchedulesResponse.deserialize, @@ -1375,6 +1490,21 @@ def _prep_wrapped_messages(self, client_info): default_timeout=3600.0, client_info=client_info, ), + self.add_split_points: self._wrap_method( + self.add_split_points, + default_retry=retries.AsyncRetry( + initial=1.0, + maximum=32.0, + multiplier=1.3, + predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=3600.0, + ), + default_timeout=3600.0, + client_info=client_info, + ), self.create_backup_schedule: self._wrap_method( self.create_backup_schedule, default_retry=retries.AsyncRetry( @@ -1478,7 +1608,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: @@ -1494,7 +1624,7 @@ def delete_operation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_operation" not in self._stubs: - self._stubs["delete_operation"] = self.grpc_channel.unary_unary( + self._stubs["delete_operation"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/DeleteOperation", request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString, response_deserializer=None, @@ -1511,7 +1641,7 @@ def cancel_operation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "cancel_operation" not in self._stubs: - self._stubs["cancel_operation"] = self.grpc_channel.unary_unary( + self._stubs["cancel_operation"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/CancelOperation", request_serializer=operations_pb2.CancelOperationRequest.SerializeToString, response_deserializer=None, @@ -1528,7 +1658,7 @@ def get_operation( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_operation" not in self._stubs: - self._stubs["get_operation"] = self.grpc_channel.unary_unary( + self._stubs["get_operation"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/GetOperation", request_serializer=operations_pb2.GetOperationRequest.SerializeToString, response_deserializer=operations_pb2.Operation.FromString, @@ -1547,7 +1677,7 @@ def list_operations( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_operations" not in self._stubs: - self._stubs["list_operations"] = self.grpc_channel.unary_unary( + self._stubs["list_operations"] = self._logged_channel.unary_unary( "/google.longrunning.Operations/ListOperations", request_serializer=operations_pb2.ListOperationsRequest.SerializeToString, response_deserializer=operations_pb2.ListOperationsResponse.FromString, diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py index e88a8fa080..30adfa8b07 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -53,6 +54,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -76,6 +85,14 @@ class DatabaseAdminRestInterceptor: .. code-block:: python class MyCustomDatabaseAdminInterceptor(DatabaseAdminRestInterceptor): + def pre_add_split_points(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_add_split_points(self, response): + logging.log(f"Received response: {response}") + return response + def pre_copy_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -270,9 +287,63 @@ def post_update_database_ddl(self, response): """ + def pre_add_split_points( + self, + request: spanner_database_admin.AddSplitPointsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.AddSplitPointsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Pre-rpc interceptor for add_split_points + + Override in a subclass to manipulate the request or metadata + before they are sent to the DatabaseAdmin server. + """ + return request, metadata + + def post_add_split_points( + self, response: spanner_database_admin.AddSplitPointsResponse + ) -> spanner_database_admin.AddSplitPointsResponse: + """Post-rpc interceptor for add_split_points + + DEPRECATED. Please use the `post_add_split_points_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response + after it is returned by the DatabaseAdmin server but before + it is returned to user code. This `post_add_split_points` interceptor runs + before the `post_add_split_points_with_metadata` interceptor. + """ + return response + + def post_add_split_points_with_metadata( + self, + response: spanner_database_admin.AddSplitPointsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.AddSplitPointsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for add_split_points + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_add_split_points_with_metadata` + interceptor in new development instead of the `post_add_split_points` interceptor. + When both interceptors are used, this `post_add_split_points_with_metadata` interceptor runs after the + `post_add_split_points` interceptor. The (possibly modified) response returned by + `post_add_split_points` will be passed to + `post_add_split_points_with_metadata`. + """ + return response, metadata + def pre_copy_backup( - self, request: backup.CopyBackupRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[backup.CopyBackupRequest, Sequence[Tuple[str, str]]]: + self, + request: backup.CopyBackupRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[backup.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for copy_backup Override in a subclass to manipulate the request or metadata @@ -285,17 +356,42 @@ def post_copy_backup( ) -> operations_pb2.Operation: """Post-rpc interceptor for copy_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_copy_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_copy_backup` interceptor runs + before the `post_copy_backup_with_metadata` interceptor. """ return response + def post_copy_backup_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for copy_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_copy_backup_with_metadata` + interceptor in new development instead of the `post_copy_backup` interceptor. + When both interceptors are used, this `post_copy_backup_with_metadata` interceptor runs after the + `post_copy_backup` interceptor. The (possibly modified) response returned by + `post_copy_backup` will be passed to + `post_copy_backup_with_metadata`. + """ + return response, metadata + def pre_create_backup( self, request: gsad_backup.CreateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[gsad_backup.CreateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gsad_backup.CreateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for create_backup Override in a subclass to manipulate the request or metadata @@ -308,18 +404,42 @@ def post_create_backup( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_backup` interceptor runs + before the `post_create_backup_with_metadata` interceptor. """ return response + def post_create_backup_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_create_backup_with_metadata` + interceptor in new development instead of the `post_create_backup` interceptor. + When both interceptors are used, this `post_create_backup_with_metadata` interceptor runs after the + `post_create_backup` interceptor. The (possibly modified) response returned by + `post_create_backup` will be passed to + `post_create_backup_with_metadata`. + """ + return response, metadata + def pre_create_backup_schedule( self, request: gsad_backup_schedule.CreateBackupScheduleRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - gsad_backup_schedule.CreateBackupScheduleRequest, Sequence[Tuple[str, str]] + gsad_backup_schedule.CreateBackupScheduleRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_backup_schedule @@ -333,17 +453,45 @@ def post_create_backup_schedule( ) -> gsad_backup_schedule.BackupSchedule: """Post-rpc interceptor for create_backup_schedule - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_backup_schedule_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_backup_schedule` interceptor runs + before the `post_create_backup_schedule_with_metadata` interceptor. """ return response + def post_create_backup_schedule_with_metadata( + self, + response: gsad_backup_schedule.BackupSchedule, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for create_backup_schedule + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_create_backup_schedule_with_metadata` + interceptor in new development instead of the `post_create_backup_schedule` interceptor. + When both interceptors are used, this `post_create_backup_schedule_with_metadata` interceptor runs after the + `post_create_backup_schedule` interceptor. The (possibly modified) response returned by + `post_create_backup_schedule` will be passed to + `post_create_backup_schedule_with_metadata`. + """ + return response, metadata + def pre_create_database( self, request: spanner_database_admin.CreateDatabaseRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_database_admin.CreateDatabaseRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.CreateDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_database Override in a subclass to manipulate the request or metadata @@ -356,15 +504,40 @@ def post_create_database( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_database - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_database` interceptor runs + before the `post_create_database_with_metadata` interceptor. """ return response + def post_create_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_create_database_with_metadata` + interceptor in new development instead of the `post_create_database` interceptor. + When both interceptors are used, this `post_create_database_with_metadata` interceptor runs after the + `post_create_database` interceptor. The (possibly modified) response returned by + `post_create_database` will be passed to + `post_create_database_with_metadata`. + """ + return response, metadata + def pre_delete_backup( - self, request: backup.DeleteBackupRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[backup.DeleteBackupRequest, Sequence[Tuple[str, str]]]: + self, + request: backup.DeleteBackupRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[backup.DeleteBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for delete_backup Override in a subclass to manipulate the request or metadata @@ -375,8 +548,11 @@ def pre_delete_backup( def pre_delete_backup_schedule( self, request: backup_schedule.DeleteBackupScheduleRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[backup_schedule.DeleteBackupScheduleRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + backup_schedule.DeleteBackupScheduleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_backup_schedule Override in a subclass to manipulate the request or metadata @@ -387,8 +563,11 @@ def pre_delete_backup_schedule( def pre_drop_database( self, request: spanner_database_admin.DropDatabaseRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_database_admin.DropDatabaseRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.DropDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for drop_database Override in a subclass to manipulate the request or metadata @@ -397,8 +576,10 @@ def pre_drop_database( return request, metadata def pre_get_backup( - self, request: backup.GetBackupRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[backup.GetBackupRequest, Sequence[Tuple[str, str]]]: + self, + request: backup.GetBackupRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[backup.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_backup Override in a subclass to manipulate the request or metadata @@ -409,17 +590,41 @@ def pre_get_backup( def post_get_backup(self, response: backup.Backup) -> backup.Backup: """Post-rpc interceptor for get_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_backup` interceptor runs + before the `post_get_backup_with_metadata` interceptor. """ return response + def post_get_backup_with_metadata( + self, response: backup.Backup, metadata: Sequence[Tuple[str, Union[str, bytes]]] + ) -> Tuple[backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_get_backup_with_metadata` + interceptor in new development instead of the `post_get_backup` interceptor. + When both interceptors are used, this `post_get_backup_with_metadata` interceptor runs after the + `post_get_backup` interceptor. The (possibly modified) response returned by + `post_get_backup` will be passed to + `post_get_backup_with_metadata`. + """ + return response, metadata + def pre_get_backup_schedule( self, request: backup_schedule.GetBackupScheduleRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[backup_schedule.GetBackupScheduleRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + backup_schedule.GetBackupScheduleRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_backup_schedule Override in a subclass to manipulate the request or metadata @@ -432,17 +637,43 @@ def post_get_backup_schedule( ) -> backup_schedule.BackupSchedule: """Post-rpc interceptor for get_backup_schedule - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_backup_schedule_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_backup_schedule` interceptor runs + before the `post_get_backup_schedule_with_metadata` interceptor. """ return response + def post_get_backup_schedule_with_metadata( + self, + response: backup_schedule.BackupSchedule, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_backup_schedule + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_get_backup_schedule_with_metadata` + interceptor in new development instead of the `post_get_backup_schedule` interceptor. + When both interceptors are used, this `post_get_backup_schedule_with_metadata` interceptor runs after the + `post_get_backup_schedule` interceptor. The (possibly modified) response returned by + `post_get_backup_schedule` will be passed to + `post_get_backup_schedule_with_metadata`. + """ + return response, metadata + def pre_get_database( self, request: spanner_database_admin.GetDatabaseRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_database_admin.GetDatabaseRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.GetDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_database Override in a subclass to manipulate the request or metadata @@ -455,17 +686,45 @@ def post_get_database( ) -> spanner_database_admin.Database: """Post-rpc interceptor for get_database - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_database` interceptor runs + before the `post_get_database_with_metadata` interceptor. """ return response + def post_get_database_with_metadata( + self, + response: spanner_database_admin.Database, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.Database, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_get_database_with_metadata` + interceptor in new development instead of the `post_get_database` interceptor. + When both interceptors are used, this `post_get_database_with_metadata` interceptor runs after the + `post_get_database` interceptor. The (possibly modified) response returned by + `post_get_database` will be passed to + `post_get_database_with_metadata`. + """ + return response, metadata + def pre_get_database_ddl( self, request: spanner_database_admin.GetDatabaseDdlRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_database_admin.GetDatabaseDdlRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.GetDatabaseDdlRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_database_ddl Override in a subclass to manipulate the request or metadata @@ -478,17 +737,45 @@ def post_get_database_ddl( ) -> spanner_database_admin.GetDatabaseDdlResponse: """Post-rpc interceptor for get_database_ddl - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_database_ddl_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_database_ddl` interceptor runs + before the `post_get_database_ddl_with_metadata` interceptor. """ return response + def post_get_database_ddl_with_metadata( + self, + response: spanner_database_admin.GetDatabaseDdlResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.GetDatabaseDdlResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_database_ddl + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_get_database_ddl_with_metadata` + interceptor in new development instead of the `post_get_database_ddl` interceptor. + When both interceptors are used, this `post_get_database_ddl_with_metadata` interceptor runs after the + `post_get_database_ddl` interceptor. The (possibly modified) response returned by + `post_get_database_ddl` will be passed to + `post_get_database_ddl_with_metadata`. + """ + return response, metadata + def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -499,17 +786,42 @@ def pre_get_iam_policy( def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for get_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. """ return response + def post_get_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + def pre_list_backup_operations( self, request: backup.ListBackupOperationsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[backup.ListBackupOperationsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + backup.ListBackupOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_backup_operations Override in a subclass to manipulate the request or metadata @@ -522,15 +834,42 @@ def post_list_backup_operations( ) -> backup.ListBackupOperationsResponse: """Post-rpc interceptor for list_backup_operations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_backup_operations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_backup_operations` interceptor runs + before the `post_list_backup_operations_with_metadata` interceptor. """ return response + def post_list_backup_operations_with_metadata( + self, + response: backup.ListBackupOperationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + backup.ListBackupOperationsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for list_backup_operations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_list_backup_operations_with_metadata` + interceptor in new development instead of the `post_list_backup_operations` interceptor. + When both interceptors are used, this `post_list_backup_operations_with_metadata` interceptor runs after the + `post_list_backup_operations` interceptor. The (possibly modified) response returned by + `post_list_backup_operations` will be passed to + `post_list_backup_operations_with_metadata`. + """ + return response, metadata + def pre_list_backups( - self, request: backup.ListBackupsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[backup.ListBackupsRequest, Sequence[Tuple[str, str]]]: + self, + request: backup.ListBackupsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[backup.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_backups Override in a subclass to manipulate the request or metadata @@ -543,17 +882,43 @@ def post_list_backups( ) -> backup.ListBackupsResponse: """Post-rpc interceptor for list_backups - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_backups_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_backups` interceptor runs + before the `post_list_backups_with_metadata` interceptor. """ return response + def post_list_backups_with_metadata( + self, + response: backup.ListBackupsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[backup.ListBackupsResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for list_backups + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_list_backups_with_metadata` + interceptor in new development instead of the `post_list_backups` interceptor. + When both interceptors are used, this `post_list_backups_with_metadata` interceptor runs after the + `post_list_backups` interceptor. The (possibly modified) response returned by + `post_list_backups` will be passed to + `post_list_backups_with_metadata`. + """ + return response, metadata + def pre_list_backup_schedules( self, request: backup_schedule.ListBackupSchedulesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[backup_schedule.ListBackupSchedulesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + backup_schedule.ListBackupSchedulesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_backup_schedules Override in a subclass to manipulate the request or metadata @@ -566,18 +931,45 @@ def post_list_backup_schedules( ) -> backup_schedule.ListBackupSchedulesResponse: """Post-rpc interceptor for list_backup_schedules - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_backup_schedules_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_backup_schedules` interceptor runs + before the `post_list_backup_schedules_with_metadata` interceptor. """ return response + def post_list_backup_schedules_with_metadata( + self, + response: backup_schedule.ListBackupSchedulesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + backup_schedule.ListBackupSchedulesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_backup_schedules + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_list_backup_schedules_with_metadata` + interceptor in new development instead of the `post_list_backup_schedules` interceptor. + When both interceptors are used, this `post_list_backup_schedules_with_metadata` interceptor runs after the + `post_list_backup_schedules` interceptor. The (possibly modified) response returned by + `post_list_backup_schedules` will be passed to + `post_list_backup_schedules_with_metadata`. + """ + return response, metadata + def pre_list_database_operations( self, request: spanner_database_admin.ListDatabaseOperationsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_database_admin.ListDatabaseOperationsRequest, Sequence[Tuple[str, str]] + spanner_database_admin.ListDatabaseOperationsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_database_operations @@ -591,18 +983,45 @@ def post_list_database_operations( ) -> spanner_database_admin.ListDatabaseOperationsResponse: """Post-rpc interceptor for list_database_operations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_database_operations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_database_operations` interceptor runs + before the `post_list_database_operations_with_metadata` interceptor. """ return response + def post_list_database_operations_with_metadata( + self, + response: spanner_database_admin.ListDatabaseOperationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.ListDatabaseOperationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_database_operations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_list_database_operations_with_metadata` + interceptor in new development instead of the `post_list_database_operations` interceptor. + When both interceptors are used, this `post_list_database_operations_with_metadata` interceptor runs after the + `post_list_database_operations` interceptor. The (possibly modified) response returned by + `post_list_database_operations` will be passed to + `post_list_database_operations_with_metadata`. + """ + return response, metadata + def pre_list_database_roles( self, request: spanner_database_admin.ListDatabaseRolesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_database_admin.ListDatabaseRolesRequest, Sequence[Tuple[str, str]] + spanner_database_admin.ListDatabaseRolesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_database_roles @@ -616,17 +1035,46 @@ def post_list_database_roles( ) -> spanner_database_admin.ListDatabaseRolesResponse: """Post-rpc interceptor for list_database_roles - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_database_roles_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_database_roles` interceptor runs + before the `post_list_database_roles_with_metadata` interceptor. """ return response + def post_list_database_roles_with_metadata( + self, + response: spanner_database_admin.ListDatabaseRolesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.ListDatabaseRolesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_database_roles + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_list_database_roles_with_metadata` + interceptor in new development instead of the `post_list_database_roles` interceptor. + When both interceptors are used, this `post_list_database_roles_with_metadata` interceptor runs after the + `post_list_database_roles` interceptor. The (possibly modified) response returned by + `post_list_database_roles` will be passed to + `post_list_database_roles_with_metadata`. + """ + return response, metadata + def pre_list_databases( self, request: spanner_database_admin.ListDatabasesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_database_admin.ListDatabasesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.ListDatabasesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_databases Override in a subclass to manipulate the request or metadata @@ -639,18 +1087,45 @@ def post_list_databases( ) -> spanner_database_admin.ListDatabasesResponse: """Post-rpc interceptor for list_databases - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_databases_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_databases` interceptor runs + before the `post_list_databases_with_metadata` interceptor. """ return response + def post_list_databases_with_metadata( + self, + response: spanner_database_admin.ListDatabasesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.ListDatabasesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_databases + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_list_databases_with_metadata` + interceptor in new development instead of the `post_list_databases` interceptor. + When both interceptors are used, this `post_list_databases_with_metadata` interceptor runs after the + `post_list_databases` interceptor. The (possibly modified) response returned by + `post_list_databases` will be passed to + `post_list_databases_with_metadata`. + """ + return response, metadata + def pre_restore_database( self, request: spanner_database_admin.RestoreDatabaseRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_database_admin.RestoreDatabaseRequest, Sequence[Tuple[str, str]] + spanner_database_admin.RestoreDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for restore_database @@ -664,17 +1139,42 @@ def post_restore_database( ) -> operations_pb2.Operation: """Post-rpc interceptor for restore_database - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_restore_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_restore_database` interceptor runs + before the `post_restore_database_with_metadata` interceptor. """ return response + def post_restore_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for restore_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_restore_database_with_metadata` + interceptor in new development instead of the `post_restore_database` interceptor. + When both interceptors are used, this `post_restore_database_with_metadata` interceptor runs after the + `post_restore_database` interceptor. The (possibly modified) response returned by + `post_restore_database` will be passed to + `post_restore_database_with_metadata`. + """ + return response, metadata + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -685,17 +1185,43 @@ def pre_set_iam_policy( def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for set_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. """ return response + def post_set_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -708,17 +1234,45 @@ def post_test_iam_permissions( ) -> iam_policy_pb2.TestIamPermissionsResponse: """Post-rpc interceptor for test_iam_permissions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_test_iam_permissions` interceptor runs + before the `post_test_iam_permissions_with_metadata` interceptor. """ return response + def post_test_iam_permissions_with_metadata( + self, + response: iam_policy_pb2.TestIamPermissionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_test_iam_permissions_with_metadata` + interceptor in new development instead of the `post_test_iam_permissions` interceptor. + When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the + `post_test_iam_permissions` interceptor. The (possibly modified) response returned by + `post_test_iam_permissions` will be passed to + `post_test_iam_permissions_with_metadata`. + """ + return response, metadata + def pre_update_backup( self, request: gsad_backup.UpdateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[gsad_backup.UpdateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gsad_backup.UpdateBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for update_backup Override in a subclass to manipulate the request or metadata @@ -729,18 +1283,42 @@ def pre_update_backup( def post_update_backup(self, response: gsad_backup.Backup) -> gsad_backup.Backup: """Post-rpc interceptor for update_backup - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_backup_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_backup` interceptor runs + before the `post_update_backup_with_metadata` interceptor. """ return response + def post_update_backup_with_metadata( + self, + response: gsad_backup.Backup, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[gsad_backup.Backup, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_backup + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_update_backup_with_metadata` + interceptor in new development instead of the `post_update_backup` interceptor. + When both interceptors are used, this `post_update_backup_with_metadata` interceptor runs after the + `post_update_backup` interceptor. The (possibly modified) response returned by + `post_update_backup` will be passed to + `post_update_backup_with_metadata`. + """ + return response, metadata + def pre_update_backup_schedule( self, request: gsad_backup_schedule.UpdateBackupScheduleRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - gsad_backup_schedule.UpdateBackupScheduleRequest, Sequence[Tuple[str, str]] + gsad_backup_schedule.UpdateBackupScheduleRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_backup_schedule @@ -754,17 +1332,45 @@ def post_update_backup_schedule( ) -> gsad_backup_schedule.BackupSchedule: """Post-rpc interceptor for update_backup_schedule - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_backup_schedule_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_backup_schedule` interceptor runs + before the `post_update_backup_schedule_with_metadata` interceptor. """ return response + def post_update_backup_schedule_with_metadata( + self, + response: gsad_backup_schedule.BackupSchedule, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + gsad_backup_schedule.BackupSchedule, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for update_backup_schedule + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_update_backup_schedule_with_metadata` + interceptor in new development instead of the `post_update_backup_schedule` interceptor. + When both interceptors are used, this `post_update_backup_schedule_with_metadata` interceptor runs after the + `post_update_backup_schedule` interceptor. The (possibly modified) response returned by + `post_update_backup_schedule` will be passed to + `post_update_backup_schedule_with_metadata`. + """ + return response, metadata + def pre_update_database( self, request: spanner_database_admin.UpdateDatabaseRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_database_admin.UpdateDatabaseRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_database_admin.UpdateDatabaseRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for update_database Override in a subclass to manipulate the request or metadata @@ -777,18 +1383,42 @@ def post_update_database( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_database - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_database_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_database` interceptor runs + before the `post_update_database_with_metadata` interceptor. """ return response + def post_update_database_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_database + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_update_database_with_metadata` + interceptor in new development instead of the `post_update_database` interceptor. + When both interceptors are used, this `post_update_database_with_metadata` interceptor runs after the + `post_update_database` interceptor. The (possibly modified) response returned by + `post_update_database` will be passed to + `post_update_database_with_metadata`. + """ + return response, metadata + def pre_update_database_ddl( self, request: spanner_database_admin.UpdateDatabaseDdlRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_database_admin.UpdateDatabaseDdlRequest, Sequence[Tuple[str, str]] + spanner_database_admin.UpdateDatabaseDdlRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_database_ddl @@ -802,17 +1432,42 @@ def post_update_database_ddl( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_database_ddl - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_database_ddl_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the DatabaseAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_database_ddl` interceptor runs + before the `post_update_database_ddl_with_metadata` interceptor. """ return response + def post_update_database_ddl_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_database_ddl + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the DatabaseAdmin server but before it is returned to user code. + + We recommend only using this `post_update_database_ddl_with_metadata` + interceptor in new development instead of the `post_update_database_ddl` interceptor. + When both interceptors are used, this `post_update_database_ddl_with_metadata` interceptor runs after the + `post_update_database_ddl` interceptor. The (possibly modified) response returned by + `post_update_database_ddl` will be passed to + `post_update_database_ddl_with_metadata`. + """ + return response, metadata + def pre_cancel_operation( self, request: operations_pb2.CancelOperationRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[operations_pb2.CancelOperationRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.CancelOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for cancel_operation Override in a subclass to manipulate the request or metadata @@ -832,8 +1487,10 @@ def post_cancel_operation(self, response: None) -> None: def pre_delete_operation( self, request: operations_pb2.DeleteOperationRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.DeleteOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for delete_operation Override in a subclass to manipulate the request or metadata @@ -853,8 +1510,10 @@ def post_delete_operation(self, response: None) -> None: def pre_get_operation( self, request: operations_pb2.GetOperationRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.GetOperationRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_operation Override in a subclass to manipulate the request or metadata @@ -876,8 +1535,10 @@ def post_get_operation( def pre_list_operations( self, request: operations_pb2.ListOperationsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[operations_pb2.ListOperationsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + operations_pb2.ListOperationsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_operations Override in a subclass to manipulate the request or metadata @@ -1072,24 +1733,181 @@ def operations_client(self) -> operations_v1.AbstractOperationsClient: "method": "get", "uri": "/v1/{name=projects/*/instanceConfigs/*/operations}", }, - ], - } + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v1", + ) + + self._operations_client = operations_v1.AbstractOperationsClient( + transport=rest_transport + ) + + # Return the client from cache. + return self._operations_client + + class _AddSplitPoints( + _BaseDatabaseAdminRestTransport._BaseAddSplitPoints, DatabaseAdminRestStub + ): + def __hash__(self): + return hash("DatabaseAdminRestTransport.AddSplitPoints") + + @staticmethod + def _get_response( + host, + metadata, + query_params, + session, + timeout, + transcoded_request, + body=None, + ): + uri = transcoded_request["uri"] + method = transcoded_request["method"] + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(session, method)( + "{host}{uri}".format(host=host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + return response + + def __call__( + self, + request: spanner_database_admin.AddSplitPointsRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), + ) -> spanner_database_admin.AddSplitPointsResponse: + r"""Call the add split points method over HTTP. + + Args: + request (~.spanner_database_admin.AddSplitPointsRequest): + The request object. The request for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. + + Returns: + ~.spanner_database_admin.AddSplitPointsResponse: + The response for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + + """ + + http_options = ( + _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_http_options() + ) + + request, metadata = self._interceptor.pre_add_split_points( + request, metadata + ) + transcoded_request = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_transcoded_request( + http_options, request + ) + + body = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_request_body_json( + transcoded_request + ) + + # Jsonify the query params + query_params = _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_query_params_json( + transcoded_request + ) + + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.AddSplitPoints", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "AddSplitPoints", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v1", + # Send the request + response = DatabaseAdminRestTransport._AddSplitPoints._get_response( + self._host, + metadata, + query_params, + self._session, + timeout, + transcoded_request, + body, ) - self._operations_client = operations_v1.AbstractOperationsClient( - transport=rest_transport - ) + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) - # Return the client from cache. - return self._operations_client + # Return the response + resp = spanner_database_admin.AddSplitPointsResponse() + pb_resp = spanner_database_admin.AddSplitPointsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + + resp = self._interceptor.post_add_split_points(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_add_split_points_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_database_admin.AddSplitPointsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.add_split_points", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "AddSplitPoints", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) + return resp class _CopyBackup( _BaseDatabaseAdminRestTransport._BaseCopyBackup, DatabaseAdminRestStub @@ -1126,7 +1944,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the copy backup method over HTTP. @@ -1137,8 +1955,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1151,6 +1971,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_http_options() ) + request, metadata = self._interceptor.pre_copy_backup(request, metadata) transcoded_request = ( _BaseDatabaseAdminRestTransport._BaseCopyBackup._get_transcoded_request( @@ -1171,6 +1992,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CopyBackup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CopyBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._CopyBackup._get_response( self._host, @@ -1190,7 +2038,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_copy_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.copy_backup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CopyBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateBackup( @@ -1228,7 +2102,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create backup method over HTTP. @@ -1239,8 +2113,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1253,6 +2129,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_http_options() ) + request, metadata = self._interceptor.pre_create_backup(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateBackup._get_transcoded_request( http_options, request @@ -1267,6 +2144,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CreateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._CreateBackup._get_response( self._host, @@ -1286,7 +2190,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CreateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateBackupSchedule( @@ -1324,7 +2254,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup_schedule.BackupSchedule: r"""Call the create backup schedule method over HTTP. @@ -1335,8 +2265,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gsad_backup_schedule.BackupSchedule: @@ -1349,6 +2281,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseCreateBackupSchedule._get_http_options() ) + request, metadata = self._interceptor.pre_create_backup_schedule( request, metadata ) @@ -1365,6 +2298,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateBackupSchedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CreateBackupSchedule", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._CreateBackupSchedule._get_response( self._host, @@ -1386,7 +2346,35 @@ def __call__( pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_backup_schedule(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_backup_schedule_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gsad_backup_schedule.BackupSchedule.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_backup_schedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CreateBackupSchedule", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateDatabase( @@ -1424,7 +2412,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create database method over HTTP. @@ -1435,8 +2423,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1449,6 +2439,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_http_options() ) + request, metadata = self._interceptor.pre_create_database(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseCreateDatabase._get_transcoded_request( http_options, request @@ -1463,6 +2454,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CreateDatabase", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CreateDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._CreateDatabase._get_response( self._host, @@ -1482,7 +2500,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.create_database", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CreateDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteBackup( @@ -1519,7 +2563,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete backup method over HTTP. @@ -1530,13 +2574,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_http_options() ) + request, metadata = self._interceptor.pre_delete_backup(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseDeleteBackup._get_transcoded_request( http_options, request @@ -1547,6 +2594,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "DeleteBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._DeleteBackup._get_response( self._host, @@ -1596,7 +2670,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete backup schedule method over HTTP. @@ -1607,13 +2681,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseDatabaseAdminRestTransport._BaseDeleteBackupSchedule._get_http_options() ) + request, metadata = self._interceptor.pre_delete_backup_schedule( request, metadata ) @@ -1626,6 +2703,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteBackupSchedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "DeleteBackupSchedule", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._DeleteBackupSchedule._get_response( self._host, @@ -1675,7 +2779,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the drop database method over HTTP. @@ -1686,13 +2790,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_http_options() ) + request, metadata = self._interceptor.pre_drop_database(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseDropDatabase._get_transcoded_request( http_options, request @@ -1703,6 +2810,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DropDatabase", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "DropDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._DropDatabase._get_response( self._host, @@ -1752,7 +2886,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup.Backup: r"""Call the get backup method over HTTP. @@ -1763,8 +2897,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.backup.Backup: @@ -1774,6 +2910,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseGetBackup._get_http_options() ) + request, metadata = self._interceptor.pre_get_backup(request, metadata) transcoded_request = ( _BaseDatabaseAdminRestTransport._BaseGetBackup._get_transcoded_request( @@ -1788,6 +2925,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._GetBackup._get_response( self._host, @@ -1808,7 +2972,33 @@ def __call__( pb_resp = backup.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = backup.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetBackupSchedule( @@ -1845,7 +3035,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup_schedule.BackupSchedule: r"""Call the get backup schedule method over HTTP. @@ -1856,8 +3046,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.backup_schedule.BackupSchedule: @@ -1870,6 +3062,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseGetBackupSchedule._get_http_options() ) + request, metadata = self._interceptor.pre_get_backup_schedule( request, metadata ) @@ -1882,6 +3075,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetBackupSchedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetBackupSchedule", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._GetBackupSchedule._get_response( self._host, @@ -1902,7 +3122,33 @@ def __call__( pb_resp = backup_schedule.BackupSchedule.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_backup_schedule(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_backup_schedule_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = backup_schedule.BackupSchedule.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_backup_schedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetBackupSchedule", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetDatabase( @@ -1939,7 +3185,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.Database: r"""Call the get database method over HTTP. @@ -1950,8 +3196,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_database_admin.Database: @@ -1961,6 +3209,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_http_options() ) + request, metadata = self._interceptor.pre_get_database(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetDatabase._get_transcoded_request( http_options, request @@ -1973,6 +3222,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabase", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._GetDatabase._get_response( self._host, @@ -1993,7 +3269,33 @@ def __call__( pb_resp = spanner_database_admin.Database.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner_database_admin.Database.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetDatabaseDdl( @@ -2030,7 +3332,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.GetDatabaseDdlResponse: r"""Call the get database ddl method over HTTP. @@ -2041,8 +3343,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_database_admin.GetDatabaseDdlResponse: @@ -2054,6 +3358,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseGetDatabaseDdl._get_http_options() ) + request, metadata = self._interceptor.pre_get_database_ddl( request, metadata ) @@ -2066,6 +3371,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetDatabaseDdl", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetDatabaseDdl", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._GetDatabaseDdl._get_response( self._host, @@ -2086,7 +3418,35 @@ def __call__( pb_resp = spanner_database_admin.GetDatabaseDdlResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_database_ddl(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_database_ddl_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_database_admin.GetDatabaseDdlResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_database_ddl", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetDatabaseDdl", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetIamPolicy( @@ -2124,7 +3484,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -2134,8 +3494,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2220,6 +3582,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( http_options, request @@ -2234,6 +3597,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._GetIamPolicy._get_response( self._host, @@ -2255,7 +3645,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.get_iam_policy", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListBackupOperations( @@ -2292,7 +3708,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup.ListBackupOperationsResponse: r"""Call the list backup operations method over HTTP. @@ -2303,8 +3719,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.backup.ListBackupOperationsResponse: @@ -2316,6 +3734,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListBackupOperations._get_http_options() ) + request, metadata = self._interceptor.pre_list_backup_operations( request, metadata ) @@ -2328,6 +3747,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupOperations", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListBackupOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListBackupOperations._get_response( self._host, @@ -2348,7 +3794,35 @@ def __call__( pb_resp = backup.ListBackupOperationsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backup_operations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_backup_operations_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = backup.ListBackupOperationsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_operations", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListBackupOperations", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListBackups( @@ -2385,7 +3859,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup.ListBackupsResponse: r"""Call the list backups method over HTTP. @@ -2396,8 +3870,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.backup.ListBackupsResponse: @@ -2409,6 +3885,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListBackups._get_http_options() ) + request, metadata = self._interceptor.pre_list_backups(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseListBackups._get_transcoded_request( http_options, request @@ -2421,6 +3898,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackups", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListBackups", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListBackups._get_response( self._host, @@ -2441,7 +3945,33 @@ def __call__( pb_resp = backup.ListBackupsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backups(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_backups_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = backup.ListBackupsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backups", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListBackups", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListBackupSchedules( @@ -2478,7 +4008,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> backup_schedule.ListBackupSchedulesResponse: r"""Call the list backup schedules method over HTTP. @@ -2489,8 +4019,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.backup_schedule.ListBackupSchedulesResponse: @@ -2502,6 +4034,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListBackupSchedules._get_http_options() ) + request, metadata = self._interceptor.pre_list_backup_schedules( request, metadata ) @@ -2514,6 +4047,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListBackupSchedules", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListBackupSchedules", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListBackupSchedules._get_response( self._host, @@ -2534,7 +4094,35 @@ def __call__( pb_resp = backup_schedule.ListBackupSchedulesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backup_schedules(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_backup_schedules_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + backup_schedule.ListBackupSchedulesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_backup_schedules", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListBackupSchedules", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListDatabaseOperations( @@ -2572,7 +4160,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.ListDatabaseOperationsResponse: r"""Call the list database operations method over HTTP. @@ -2583,8 +4171,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_database_admin.ListDatabaseOperationsResponse: @@ -2596,6 +4186,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListDatabaseOperations._get_http_options() ) + request, metadata = self._interceptor.pre_list_database_operations( request, metadata ) @@ -2608,6 +4199,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseOperations", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListDatabaseOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListDatabaseOperations._get_response( self._host, @@ -2628,7 +4246,37 @@ def __call__( pb_resp = spanner_database_admin.ListDatabaseOperationsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_database_operations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_database_operations_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_database_admin.ListDatabaseOperationsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_operations", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListDatabaseOperations", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListDatabaseRoles( @@ -2665,7 +4313,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.ListDatabaseRolesResponse: r"""Call the list database roles method over HTTP. @@ -2676,8 +4324,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_database_admin.ListDatabaseRolesResponse: @@ -2689,6 +4339,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListDatabaseRoles._get_http_options() ) + request, metadata = self._interceptor.pre_list_database_roles( request, metadata ) @@ -2701,6 +4352,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabaseRoles", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListDatabaseRoles", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListDatabaseRoles._get_response( self._host, @@ -2721,7 +4399,37 @@ def __call__( pb_resp = spanner_database_admin.ListDatabaseRolesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_database_roles(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_database_roles_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_database_admin.ListDatabaseRolesResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_database_roles", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListDatabaseRoles", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListDatabases( @@ -2758,7 +4466,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_database_admin.ListDatabasesResponse: r"""Call the list databases method over HTTP. @@ -2769,8 +4477,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_database_admin.ListDatabasesResponse: @@ -2782,6 +4492,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListDatabases._get_http_options() ) + request, metadata = self._interceptor.pre_list_databases(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseListDatabases._get_transcoded_request( http_options, request @@ -2792,6 +4503,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListDatabases", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListDatabases", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListDatabases._get_response( self._host, @@ -2812,7 +4550,35 @@ def __call__( pb_resp = spanner_database_admin.ListDatabasesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_databases(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_databases_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_database_admin.ListDatabasesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.list_databases", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListDatabases", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _RestoreDatabase( @@ -2850,7 +4616,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the restore database method over HTTP. @@ -2861,8 +4627,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2875,6 +4643,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseRestoreDatabase._get_http_options() ) + request, metadata = self._interceptor.pre_restore_database( request, metadata ) @@ -2891,6 +4660,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.RestoreDatabase", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "RestoreDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._RestoreDatabase._get_response( self._host, @@ -2910,7 +4706,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_restore_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_restore_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.restore_database", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "RestoreDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SetIamPolicy( @@ -2948,7 +4770,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -2958,8 +4780,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -3044,6 +4868,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( http_options, request @@ -3058,6 +4883,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._SetIamPolicy._get_response( self._host, @@ -3079,7 +4931,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.set_iam_policy", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _TestIamPermissions( @@ -3117,7 +4995,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -3127,8 +5005,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: @@ -3138,6 +5018,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseTestIamPermissions._get_http_options() ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) @@ -3154,6 +5035,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._TestIamPermissions._get_response( self._host, @@ -3175,7 +5083,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateBackup( @@ -3213,7 +5147,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup.Backup: r"""Call the update backup method over HTTP. @@ -3224,8 +5158,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gsad_backup.Backup: @@ -3235,6 +5171,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_http_options() ) + request, metadata = self._interceptor.pre_update_backup(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateBackup._get_transcoded_request( http_options, request @@ -3249,6 +5186,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._UpdateBackup._get_response( self._host, @@ -3270,7 +5234,33 @@ def __call__( pb_resp = gsad_backup.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_backup(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_backup_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gsad_backup.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateBackupSchedule( @@ -3308,7 +5298,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gsad_backup_schedule.BackupSchedule: r"""Call the update backup schedule method over HTTP. @@ -3319,8 +5309,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gsad_backup_schedule.BackupSchedule: @@ -3333,6 +5325,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseUpdateBackupSchedule._get_http_options() ) + request, metadata = self._interceptor.pre_update_backup_schedule( request, metadata ) @@ -3349,6 +5342,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateBackupSchedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateBackupSchedule", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._UpdateBackupSchedule._get_response( self._host, @@ -3370,7 +5390,35 @@ def __call__( pb_resp = gsad_backup_schedule.BackupSchedule.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_backup_schedule(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_backup_schedule_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gsad_backup_schedule.BackupSchedule.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_backup_schedule", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateBackupSchedule", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateDatabase( @@ -3408,7 +5456,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update database method over HTTP. @@ -3419,8 +5467,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3433,6 +5483,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_http_options() ) + request, metadata = self._interceptor.pre_update_database(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseUpdateDatabase._get_transcoded_request( http_options, request @@ -3447,6 +5498,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabase", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateDatabase", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._UpdateDatabase._get_response( self._host, @@ -3466,7 +5544,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_database(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_database_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateDatabase", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateDatabaseDdl( @@ -3504,7 +5608,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update database ddl method over HTTP. @@ -3532,8 +5636,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3546,6 +5652,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseUpdateDatabaseDdl._get_http_options() ) + request, metadata = self._interceptor.pre_update_database_ddl( request, metadata ) @@ -3562,6 +5669,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.UpdateDatabaseDdl", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateDatabaseDdl", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._UpdateDatabaseDdl._get_response( self._host, @@ -3581,9 +5715,46 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_database_ddl(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_database_ddl_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminClient.update_database_ddl", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "UpdateDatabaseDdl", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp + @property + def add_split_points( + self, + ) -> Callable[ + [spanner_database_admin.AddSplitPointsRequest], + spanner_database_admin.AddSplitPointsResponse, + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._AddSplitPoints(self._session, self._host, self._interceptor) # type: ignore + @property def copy_backup( self, @@ -3856,7 +6027,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Call the cancel operation method over HTTP. @@ -3866,13 +6037,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseDatabaseAdminRestTransport._BaseCancelOperation._get_http_options() ) + request, metadata = self._interceptor.pre_cancel_operation( request, metadata ) @@ -3885,6 +6059,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.CancelOperation", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "CancelOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._CancelOperation._get_response( self._host, @@ -3940,7 +6141,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Call the delete operation method over HTTP. @@ -3950,13 +6151,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseDatabaseAdminRestTransport._BaseDeleteOperation._get_http_options() ) + request, metadata = self._interceptor.pre_delete_operation( request, metadata ) @@ -3969,6 +6173,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.DeleteOperation", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "DeleteOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._DeleteOperation._get_response( self._host, @@ -4024,7 +6255,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the get operation method over HTTP. @@ -4034,8 +6265,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: operations_pb2.Operation: Response from GetOperation method. @@ -4044,6 +6277,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseGetOperation._get_http_options() ) + request, metadata = self._interceptor.pre_get_operation(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseGetOperation._get_transcoded_request( http_options, request @@ -4054,6 +6288,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.GetOperation", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetOperation", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._GetOperation._get_response( self._host, @@ -4073,6 +6334,27 @@ def __call__( resp = operations_pb2.Operation() resp = json_format.Parse(content, resp) resp = self._interceptor.post_get_operation(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.GetOperation", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "GetOperation", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) return resp @property @@ -4113,7 +6395,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.ListOperationsResponse: r"""Call the list operations method over HTTP. @@ -4123,8 +6405,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: operations_pb2.ListOperationsResponse: Response from ListOperations method. @@ -4133,6 +6417,7 @@ def __call__( http_options = ( _BaseDatabaseAdminRestTransport._BaseListOperations._get_http_options() ) + request, metadata = self._interceptor.pre_list_operations(request, metadata) transcoded_request = _BaseDatabaseAdminRestTransport._BaseListOperations._get_transcoded_request( http_options, request @@ -4143,6 +6428,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.database_v1.DatabaseAdminClient.ListOperations", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = DatabaseAdminRestTransport._ListOperations._get_response( self._host, @@ -4162,6 +6474,27 @@ def __call__( resp = operations_pb2.ListOperationsResponse() resp = json_format.Parse(content, resp) resp = self._interceptor.post_list_operations(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.database_v1.DatabaseAdminAsyncClient.ListOperations", + extra={ + "serviceName": "google.spanner.admin.database.v1.DatabaseAdmin", + "rpcName": "ListOperations", + "httpResponse": http_response, + "metadata": http_response["headers"], + }, + ) return resp @property diff --git a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py index 677f050cae..b55ca50b62 100644 --- a/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py +++ b/google/cloud/spanner_admin_database_v1/services/database_admin/transports/rest_base.py @@ -99,6 +99,63 @@ def __init__( api_audience=api_audience, ) + class _BaseAddSplitPoints: + def __hash__(self): # pragma: NO COVER + return NotImplementedError("__hash__ must be implemented.") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + @staticmethod + def _get_http_options(): + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints", + "body": "*", + }, + ] + return http_options + + @staticmethod + def _get_transcoded_request(http_options, request): + pb_request = spanner_database_admin.AddSplitPointsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + return transcoded_request + + @staticmethod + def _get_request_body_json(transcoded_request): + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], use_integers_for_enums=True + ) + return body + + @staticmethod + def _get_query_params_json(transcoded_request): + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + use_integers_for_enums=True, + ) + ) + query_params.update( + _BaseDatabaseAdminRestTransport._BaseAddSplitPoints._get_unset_required_fields( + query_params + ) + ) + + query_params["$alt"] = "json;enum-encoding=int" + return query_params + class _BaseCopyBackup: def __hash__(self): # pragma: NO COVER return NotImplementedError("__hash__ must be implemented.") diff --git a/google/cloud/spanner_admin_database_v1/types/__init__.py b/google/cloud/spanner_admin_database_v1/types/__init__.py index 9a9515e9b2..70db52cd35 100644 --- a/google/cloud/spanner_admin_database_v1/types/__init__.py +++ b/google/cloud/spanner_admin_database_v1/types/__init__.py @@ -16,6 +16,7 @@ from .backup import ( Backup, BackupInfo, + BackupInstancePartition, CopyBackupEncryptionConfig, CopyBackupMetadata, CopyBackupRequest, @@ -50,6 +51,8 @@ DatabaseDialect, ) from .spanner_database_admin import ( + AddSplitPointsRequest, + AddSplitPointsResponse, CreateDatabaseMetadata, CreateDatabaseRequest, Database, @@ -70,6 +73,7 @@ RestoreDatabaseMetadata, RestoreDatabaseRequest, RestoreInfo, + SplitPoints, UpdateDatabaseDdlMetadata, UpdateDatabaseDdlRequest, UpdateDatabaseMetadata, @@ -80,6 +84,7 @@ __all__ = ( "Backup", "BackupInfo", + "BackupInstancePartition", "CopyBackupEncryptionConfig", "CopyBackupMetadata", "CopyBackupRequest", @@ -108,6 +113,8 @@ "EncryptionInfo", "OperationProgress", "DatabaseDialect", + "AddSplitPointsRequest", + "AddSplitPointsResponse", "CreateDatabaseMetadata", "CreateDatabaseRequest", "Database", @@ -128,6 +135,7 @@ "RestoreDatabaseMetadata", "RestoreDatabaseRequest", "RestoreInfo", + "SplitPoints", "UpdateDatabaseDdlMetadata", "UpdateDatabaseDdlRequest", "UpdateDatabaseMetadata", diff --git a/google/cloud/spanner_admin_database_v1/types/backup.py b/google/cloud/spanner_admin_database_v1/types/backup.py index 0c220c3953..acec22244f 100644 --- a/google/cloud/spanner_admin_database_v1/types/backup.py +++ b/google/cloud/spanner_admin_database_v1/types/backup.py @@ -45,6 +45,7 @@ "CopyBackupEncryptionConfig", "FullBackupSpec", "IncrementalBackupSpec", + "BackupInstancePartition", }, ) @@ -199,6 +200,12 @@ class Backup(proto.Message): this is the version time of the backup. This field can be used to understand what data is being retained by the backup system. + instance_partitions (MutableSequence[google.cloud.spanner_admin_database_v1.types.BackupInstancePartition]): + Output only. The instance partition(s) storing the backup. + + This is the same as the list of the instance partition(s) + that the database had footprint in at the backup's + ``version_time``. """ class State(proto.Enum): @@ -300,6 +307,13 @@ class State(proto.Enum): number=18, message=timestamp_pb2.Timestamp, ) + instance_partitions: MutableSequence[ + "BackupInstancePartition" + ] = proto.RepeatedField( + proto.MESSAGE, + number=19, + message="BackupInstancePartition", + ) class CreateBackupRequest(proto.Message): @@ -1073,4 +1087,20 @@ class IncrementalBackupSpec(proto.Message): """ +class BackupInstancePartition(proto.Message): + r"""Instance partition information for the backup. + + Attributes: + instance_partition (str): + A unique identifier for the instance partition. Values are + of the form + ``projects//instances//instancePartitions/`` + """ + + instance_partition: str = proto.Field( + proto.STRING, + number=1, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_admin_database_v1/types/backup_schedule.py b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py index ad9a7ddaf2..9637480731 100644 --- a/google/cloud/spanner_admin_database_v1/types/backup_schedule.py +++ b/google/cloud/spanner_admin_database_v1/types/backup_schedule.py @@ -160,7 +160,7 @@ class CrontabSpec(proto.Message): Required. Textual representation of the crontab. User can customize the backup frequency and the backup version time using the cron expression. The version time must be in UTC - timzeone. + timezone. The backup will contain an externally consistent copy of the database at the version time. Allowed frequencies are 12 diff --git a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py index 0f45d87920..3a9c0d8edd 100644 --- a/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py +++ b/google/cloud/spanner_admin_database_v1/types/spanner_database_admin.py @@ -23,6 +23,7 @@ from google.cloud.spanner_admin_database_v1.types import common from google.longrunning import operations_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -54,6 +55,9 @@ "DatabaseRole", "ListDatabaseRolesRequest", "ListDatabaseRolesResponse", + "AddSplitPointsRequest", + "AddSplitPointsResponse", + "SplitPoints", }, ) @@ -1192,4 +1196,100 @@ def raw_page(self): ) +class AddSplitPointsRequest(proto.Message): + r"""The request for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + + Attributes: + database (str): + Required. The database on whose tables/indexes split points + are to be added. Values are of the form + ``projects//instances//databases/``. + split_points (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]): + Required. The split points to add. + initiator (str): + Optional. A user-supplied tag associated with the split + points. For example, "intital_data_load", "special_event_1". + Defaults to "CloudAddSplitPointsAPI" if not specified. The + length of the tag must not exceed 50 characters,else will be + trimmed. Only valid UTF8 characters are allowed. + """ + + database: str = proto.Field( + proto.STRING, + number=1, + ) + split_points: MutableSequence["SplitPoints"] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message="SplitPoints", + ) + initiator: str = proto.Field( + proto.STRING, + number=3, + ) + + +class AddSplitPointsResponse(proto.Message): + r"""The response for + [AddSplitPoints][google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints]. + + """ + + +class SplitPoints(proto.Message): + r"""The split points of a table/index. + + Attributes: + table (str): + The table to split. + index (str): + The index to split. If specified, the ``table`` field must + refer to the index's base table. + keys (MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints.Key]): + Required. The list of split keys, i.e., the + split boundaries. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Optional. The expiration timestamp of the + split points. A timestamp in the past means + immediate expiration. The maximum value can be + 30 days in the future. Defaults to 10 days in + the future if not specified. + """ + + class Key(proto.Message): + r"""A split key. + + Attributes: + key_parts (google.protobuf.struct_pb2.ListValue): + Required. The column values making up the + split key. + """ + + key_parts: struct_pb2.ListValue = proto.Field( + proto.MESSAGE, + number=1, + message=struct_pb2.ListValue, + ) + + table: str = proto.Field( + proto.STRING, + number=1, + ) + index: str = proto.Field( + proto.STRING, + number=2, + ) + keys: MutableSequence[Key] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=Key, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/spanner_admin_instance_v1/__init__.py b/google/cloud/spanner_admin_instance_v1/__init__.py index 5d8acc4165..f5b8d7277f 100644 --- a/google/cloud/spanner_admin_instance_v1/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/__init__.py @@ -34,6 +34,7 @@ from .types.spanner_instance_admin import DeleteInstanceConfigRequest from .types.spanner_instance_admin import DeleteInstancePartitionRequest from .types.spanner_instance_admin import DeleteInstanceRequest +from .types.spanner_instance_admin import FreeInstanceMetadata from .types.spanner_instance_admin import GetInstanceConfigRequest from .types.spanner_instance_admin import GetInstancePartitionRequest from .types.spanner_instance_admin import GetInstanceRequest @@ -74,6 +75,7 @@ "DeleteInstanceConfigRequest", "DeleteInstancePartitionRequest", "DeleteInstanceRequest", + "FreeInstanceMetadata", "FulfillmentPeriod", "GetInstanceConfigRequest", "GetInstancePartitionRequest", diff --git a/google/cloud/spanner_admin_instance_v1/gapic_version.py b/google/cloud/spanner_admin_instance_v1/gapic_version.py index 5ea820ffea..9b205942db 100644 --- a/google/cloud/spanner_admin_instance_v1/gapic_version.py +++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.52.0" # {x-release-please-version} +__version__ = "3.53.0" # {x-release-please-version} diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py index 045e5c377a..33e93d9b90 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -56,6 +57,15 @@ from .transports.grpc_asyncio import InstanceAdminGrpcAsyncIOTransport from .client import InstanceAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class InstanceAdminAsyncClient: """Cloud Spanner Instance Admin API @@ -292,6 +302,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.spanner.admin.instance_v1.InstanceAdminAsyncClient`.", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "credentialsType": None, + }, + ) + async def list_instance_configs( self, request: Optional[ @@ -301,10 +333,12 @@ async def list_instance_configs( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstanceConfigsAsyncPager: r"""Lists the supported instance configurations for a given project. + Returns both Google-managed configurations and + user-managed configurations. .. code-block:: python @@ -348,8 +382,10 @@ async def sample_list_instance_configs(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigsAsyncPager: @@ -426,7 +462,7 @@ async def get_instance_config( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.InstanceConfig: r"""Gets information about a particular instance configuration. @@ -472,8 +508,10 @@ async def sample_get_instance_config(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.types.InstanceConfig: @@ -540,11 +578,10 @@ async def create_instance_config( instance_config_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates an instance configuration and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, @@ -571,14 +608,12 @@ async def create_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance configuration. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -620,7 +655,7 @@ async def sample_create_instance_config(): Args: request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.CreateInstanceConfigRequest, dict]]): The request object. The request for - [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest]. + [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. parent (:class:`str`): Required. The name of the project in which to create the instance configuration. Values are of the form @@ -630,10 +665,10 @@ async def sample_create_instance_config(): on the ``request`` instance; if ``request`` is provided, this should not be set. instance_config (:class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig`): - Required. The InstanceConfig proto of the configuration - to create. instance_config.name must be - ``/instanceConfigs/``. - instance_config.base_config must be a Google managed + Required. The ``InstanceConfig`` proto of the + configuration to create. ``instance_config.name`` must + be ``/instanceConfigs/``. + ``instance_config.base_config`` must be a Google-managed configuration name, e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3. @@ -654,8 +689,10 @@ async def sample_create_instance_config(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -734,12 +771,12 @@ async def update_instance_config( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: - r"""Updates an instance configuration. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - configuration does not exist, returns ``NOT_FOUND``. + r"""Updates an instance configuration. The returned long-running + operation can be used to track the progress of updating the + instance. If the named instance configuration does not exist, + returns ``NOT_FOUND``. Only user-managed configurations can be updated. @@ -771,15 +808,12 @@ async def update_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance configuration modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -819,7 +853,7 @@ async def sample_update_instance_config(): Args: request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.UpdateInstanceConfigRequest, dict]]): The request object. The request for - [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest]. + [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]. instance_config (:class:`google.cloud.spanner_admin_instance_v1.types.InstanceConfig`): Required. The user instance configuration to update, which must always include the instance configuration @@ -849,8 +883,10 @@ async def sample_update_instance_config(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -928,7 +964,7 @@ async def delete_instance_config( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes the instance configuration. Deletion is only allowed when no instances are using the configuration. If any instances @@ -966,7 +1002,7 @@ async def sample_delete_instance_config(): Args: request (Optional[Union[google.cloud.spanner_admin_instance_v1.types.DeleteInstanceConfigRequest, dict]]): The request object. The request for - [DeleteInstanceConfigRequest][InstanceAdmin.DeleteInstanceConfigRequest]. + [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig]. name (:class:`str`): Required. The name of the instance configuration to be deleted. Values are of the form @@ -978,8 +1014,10 @@ async def sample_delete_instance_config(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1033,14 +1071,13 @@ async def list_instance_config_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstanceConfigOperationsAsyncPager: - r"""Lists the user-managed instance configuration [long-running - operations][google.longrunning.Operation] in the given project. - An instance configuration operation has a name of the form + r"""Lists the user-managed instance configuration long-running + operations in the given project. An instance configuration + operation has a name of the form ``projects//instanceConfigs//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -1090,8 +1127,10 @@ async def sample_list_instance_config_operations(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigOperationsAsyncPager: @@ -1172,7 +1211,7 @@ async def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstancesAsyncPager: r"""Lists all instances in the given project. @@ -1218,8 +1257,10 @@ async def sample_list_instances(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesAsyncPager: @@ -1296,7 +1337,7 @@ async def list_instance_partitions( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstancePartitionsAsyncPager: r"""Lists all instance partitions for the given instance. @@ -1334,7 +1375,10 @@ async def sample_list_instance_partitions(): parent (:class:`str`): Required. The instance whose instance partitions should be listed. Values are of the form - ``projects//instances/``. + ``projects//instances/``. Use + ``{instance} = '-'`` to list instance partitions for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1342,8 +1386,10 @@ async def sample_list_instance_partitions(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionsAsyncPager: @@ -1422,7 +1468,7 @@ async def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.Instance: r"""Gets information about a particular instance. @@ -1466,8 +1512,10 @@ async def sample_get_instance(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.types.Instance: @@ -1533,12 +1581,11 @@ async def create_instance( instance: Optional[spanner_instance_admin.Instance] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates an instance and begins preparing it to begin serving. - The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance. The instance name is + The returned long-running operation can be used to track the + progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, ``CreateInstance`` returns ``ALREADY_EXISTS``. @@ -1564,14 +1611,13 @@ async def create_instance( API. - The instance's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track creation of the instance. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track creation of the instance. The metadata field type + is [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. .. code-block:: python @@ -1641,8 +1687,10 @@ async def sample_create_instance(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1722,13 +1770,12 @@ async def update_instance( field_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an instance, and begins allocating or releasing - resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - does not exist, returns ``NOT_FOUND``. + resources as requested. The returned long-running operation can + be used to track the progress of updating the instance. If the + named instance does not exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -1756,14 +1803,13 @@ async def update_instance( instance's tables. - The instance's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track the instance modification. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track the instance modification. The metadata field type + is [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Authorization requires ``spanner.instances.update`` permission @@ -1834,8 +1880,10 @@ async def sample_update_instance(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1914,7 +1962,7 @@ async def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an instance. @@ -1966,8 +2014,10 @@ async def sample_delete_instance(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2019,7 +2069,7 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -2069,8 +2119,10 @@ async def sample_set_iam_policy(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2156,7 +2208,7 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy @@ -2207,8 +2259,10 @@ async def sample_get_iam_policy(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2295,7 +2349,7 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2357,8 +2411,10 @@ async def sample_test_iam_permissions(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2418,7 +2474,7 @@ async def get_instance_partition( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.InstancePartition: r"""Gets information about a particular instance partition. @@ -2464,8 +2520,10 @@ async def sample_get_instance_partition(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.types.InstancePartition: @@ -2531,11 +2589,10 @@ async def create_instance_partition( instance_partition_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates an instance partition and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance partition. The instance partition name is assigned by the caller. If the named instance partition already exists, ``CreateInstancePartition`` @@ -2564,14 +2621,12 @@ async def create_instance_partition( readable via the API. - The instance partition's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance partition. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -2646,8 +2701,10 @@ async def sample_create_instance_partition(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2726,7 +2783,7 @@ async def delete_instance_partition( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an existing instance partition. Requires that the instance partition is not used by any database or backup and is @@ -2774,8 +2831,10 @@ async def sample_delete_instance_partition(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2832,13 +2891,13 @@ async def update_instance_partition( field_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an instance partition, and begins allocating or - releasing resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance partition. If the named - instance partition does not exist, returns ``NOT_FOUND``. + releasing resources as requested. The returned long-running + operation can be used to track the progress of updating the + instance partition. If the named instance partition does not + exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -2868,15 +2927,12 @@ async def update_instance_partition( - The instance partition's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance partition modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -2949,8 +3005,10 @@ async def sample_update_instance_partition(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -3029,14 +3087,12 @@ async def list_instance_partition_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstancePartitionOperationsAsyncPager: - r"""Lists instance partition [long-running - operations][google.longrunning.Operation] in the given instance. - An instance partition operation has a name of the form + r"""Lists instance partition long-running operations in the given + instance. An instance partition operation has a name of the form ``projects//instances//instancePartitions//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -3091,8 +3147,10 @@ async def sample_list_instance_partition_operations(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionOperationsAsyncPager: @@ -3172,12 +3230,11 @@ async def move_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Moves an instance to the target instance configuration. You can - use the returned [long-running - operation][google.longrunning.Operation] to track the progress - of moving the instance. + use the returned long-running operation to track the progress of + moving the instance. ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance meets any of the following criteria: @@ -3210,14 +3267,12 @@ async def move_instance( a higher transaction abort rate. However, moving an instance doesn't cause any downtime. - The returned [long-running - operation][google.longrunning.Operation] has a name of the - format ``/operations/`` and can be - used to track the move instance operation. The - [metadata][google.longrunning.Operation.metadata] field type is + The returned long-running operation has a name of the format + ``/operations/`` and can be used to + track the move instance operation. The metadata field type is [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation is not immediate because it involves moving any @@ -3279,8 +3334,10 @@ async def sample_move_instance(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py index 6d767f7383..11c880416b 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/client.py @@ -14,6 +14,9 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( @@ -48,6 +51,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.spanner_admin_instance_v1.services.instance_admin import pagers @@ -525,52 +537,45 @@ def _get_universe_domain( raise ValueError("Universe Domain cannot be an empty string.") return universe_domain - @staticmethod - def _compare_universes( - client_universe: str, credentials: ga_credentials.Credentials - ) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. Returns: - bool: True iff client_universe matches the universe in credentials. + bool: True iff the configured universe domain is valid. Raises: - ValueError: when client_universe does not match the universe in credentials. + ValueError: If the configured universe domain is not valid. """ - default_universe = InstanceAdminClient._DEFAULT_UNIVERSE - credentials_universe = getattr(credentials, "universe_domain", default_universe) - - if client_universe != credentials_universe: - raise ValueError( - "The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default." - ) + # NOTE (b/349488459): universe validation is disabled until further notice. return True - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. - Raises: - ValueError: If the configured universe domain is not valid. + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. """ - self._is_universe_domain_valid = ( - self._is_universe_domain_valid - or InstanceAdminClient._compare_universes( - self.universe_domain, self.transport._credentials - ) - ) - return self._is_universe_domain_valid + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) @property def api_endpoint(self): @@ -676,6 +681,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -741,6 +750,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.spanner.admin.instance_v1.InstanceAdminClient`.", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "credentialsType": None, + }, + ) + def list_instance_configs( self, request: Optional[ @@ -750,10 +782,12 @@ def list_instance_configs( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstanceConfigsPager: r"""Lists the supported instance configurations for a given project. + Returns both Google-managed configurations and + user-managed configurations. .. code-block:: python @@ -797,8 +831,10 @@ def sample_list_instance_configs(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigsPager: @@ -872,7 +908,7 @@ def get_instance_config( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.InstanceConfig: r"""Gets information about a particular instance configuration. @@ -918,8 +954,10 @@ def sample_get_instance_config(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.types.InstanceConfig: @@ -983,11 +1021,10 @@ def create_instance_config( instance_config_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates an instance configuration and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, @@ -1014,14 +1051,12 @@ def create_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance configuration. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1063,7 +1098,7 @@ def sample_create_instance_config(): Args: request (Union[google.cloud.spanner_admin_instance_v1.types.CreateInstanceConfigRequest, dict]): The request object. The request for - [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest]. + [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. parent (str): Required. The name of the project in which to create the instance configuration. Values are of the form @@ -1073,10 +1108,10 @@ def sample_create_instance_config(): on the ``request`` instance; if ``request`` is provided, this should not be set. instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): - Required. The InstanceConfig proto of the configuration - to create. instance_config.name must be - ``/instanceConfigs/``. - instance_config.base_config must be a Google managed + Required. The ``InstanceConfig`` proto of the + configuration to create. ``instance_config.name`` must + be ``/instanceConfigs/``. + ``instance_config.base_config`` must be a Google-managed configuration name, e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3. @@ -1097,8 +1132,10 @@ def sample_create_instance_config(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1174,12 +1211,12 @@ def update_instance_config( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: - r"""Updates an instance configuration. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - configuration does not exist, returns ``NOT_FOUND``. + r"""Updates an instance configuration. The returned long-running + operation can be used to track the progress of updating the + instance. If the named instance configuration does not exist, + returns ``NOT_FOUND``. Only user-managed configurations can be updated. @@ -1211,15 +1248,12 @@ def update_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance configuration modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -1259,7 +1293,7 @@ def sample_update_instance_config(): Args: request (Union[google.cloud.spanner_admin_instance_v1.types.UpdateInstanceConfigRequest, dict]): The request object. The request for - [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest]. + [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]. instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): Required. The user instance configuration to update, which must always include the instance configuration @@ -1289,8 +1323,10 @@ def sample_update_instance_config(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1365,7 +1401,7 @@ def delete_instance_config( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes the instance configuration. Deletion is only allowed when no instances are using the configuration. If any instances @@ -1403,7 +1439,7 @@ def sample_delete_instance_config(): Args: request (Union[google.cloud.spanner_admin_instance_v1.types.DeleteInstanceConfigRequest, dict]): The request object. The request for - [DeleteInstanceConfigRequest][InstanceAdmin.DeleteInstanceConfigRequest]. + [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig]. name (str): Required. The name of the instance configuration to be deleted. Values are of the form @@ -1415,8 +1451,10 @@ def sample_delete_instance_config(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1467,14 +1505,13 @@ def list_instance_config_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstanceConfigOperationsPager: - r"""Lists the user-managed instance configuration [long-running - operations][google.longrunning.Operation] in the given project. - An instance configuration operation has a name of the form + r"""Lists the user-managed instance configuration long-running + operations in the given project. An instance configuration + operation has a name of the form ``projects//instanceConfigs//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -1524,8 +1561,10 @@ def sample_list_instance_config_operations(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigOperationsPager: @@ -1605,7 +1644,7 @@ def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstancesPager: r"""Lists all instances in the given project. @@ -1651,8 +1690,10 @@ def sample_list_instances(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesPager: @@ -1726,7 +1767,7 @@ def list_instance_partitions( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstancePartitionsPager: r"""Lists all instance partitions for the given instance. @@ -1764,7 +1805,10 @@ def sample_list_instance_partitions(): parent (str): Required. The instance whose instance partitions should be listed. Values are of the form - ``projects//instances/``. + ``projects//instances/``. Use + ``{instance} = '-'`` to list instance partitions for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this @@ -1772,8 +1816,10 @@ def sample_list_instance_partitions(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionsPager: @@ -1849,7 +1895,7 @@ def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.Instance: r"""Gets information about a particular instance. @@ -1893,8 +1939,10 @@ def sample_get_instance(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.types.Instance: @@ -1957,12 +2005,11 @@ def create_instance( instance: Optional[spanner_instance_admin.Instance] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates an instance and begins preparing it to begin serving. - The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance. The instance name is + The returned long-running operation can be used to track the + progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, ``CreateInstance`` returns ``ALREADY_EXISTS``. @@ -1988,14 +2035,13 @@ def create_instance( API. - The instance's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track creation of the instance. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track creation of the instance. The metadata field type + is [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. .. code-block:: python @@ -2065,8 +2111,10 @@ def sample_create_instance(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2143,13 +2191,12 @@ def update_instance( field_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an instance, and begins allocating or releasing - resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - does not exist, returns ``NOT_FOUND``. + resources as requested. The returned long-running operation can + be used to track the progress of updating the instance. If the + named instance does not exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -2177,14 +2224,13 @@ def update_instance( instance's tables. - The instance's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track the instance modification. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track the instance modification. The metadata field type + is [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Authorization requires ``spanner.instances.update`` permission @@ -2255,8 +2301,10 @@ def sample_update_instance(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2332,7 +2380,7 @@ def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an instance. @@ -2384,8 +2432,10 @@ def sample_delete_instance(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2434,7 +2484,7 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -2484,8 +2534,10 @@ def sample_set_iam_policy(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2572,7 +2624,7 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists but does not have a policy @@ -2623,8 +2675,10 @@ def sample_get_iam_policy(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2712,7 +2766,7 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2774,8 +2828,10 @@ def sample_test_iam_permissions(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2836,7 +2892,7 @@ def get_instance_partition( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.InstancePartition: r"""Gets information about a particular instance partition. @@ -2882,8 +2938,10 @@ def sample_get_instance_partition(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.types.InstancePartition: @@ -2946,11 +3004,10 @@ def create_instance_partition( instance_partition_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates an instance partition and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance partition. The instance partition name is assigned by the caller. If the named instance partition already exists, ``CreateInstancePartition`` @@ -2979,14 +3036,12 @@ def create_instance_partition( readable via the API. - The instance partition's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance partition. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -3061,8 +3116,10 @@ def sample_create_instance_partition(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3140,7 +3197,7 @@ def delete_instance_partition( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an existing instance partition. Requires that the instance partition is not used by any database or backup and is @@ -3188,8 +3245,10 @@ def sample_delete_instance_partition(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -3245,13 +3304,13 @@ def update_instance_partition( field_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an instance partition, and begins allocating or - releasing resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance partition. If the named - instance partition does not exist, returns ``NOT_FOUND``. + releasing resources as requested. The returned long-running + operation can be used to track the progress of updating the + instance partition. If the named instance partition does not + exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -3281,15 +3340,12 @@ def update_instance_partition( - The instance partition's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance partition modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -3362,8 +3418,10 @@ def sample_update_instance_partition(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3441,14 +3499,12 @@ def list_instance_partition_operations( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListInstancePartitionOperationsPager: - r"""Lists instance partition [long-running - operations][google.longrunning.Operation] in the given instance. - An instance partition operation has a name of the form + r"""Lists instance partition long-running operations in the given + instance. An instance partition operation has a name of the form ``projects//instances//instancePartitions//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -3503,8 +3559,10 @@ def sample_list_instance_partition_operations(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionOperationsPager: @@ -3583,12 +3641,11 @@ def move_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Moves an instance to the target instance configuration. You can - use the returned [long-running - operation][google.longrunning.Operation] to track the progress - of moving the instance. + use the returned long-running operation to track the progress of + moving the instance. ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance meets any of the following criteria: @@ -3621,14 +3678,12 @@ def move_instance( a higher transaction abort rate. However, moving an instance doesn't cause any downtime. - The returned [long-running - operation][google.longrunning.Operation] has a name of the - format ``/operations/`` and can be - used to track the move instance operation. The - [metadata][google.longrunning.Operation.metadata] field type is + The returned long-running operation has a name of the format + ``/operations/`` and can be used to + track the move instance operation. The metadata field type is [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation is not immediate because it involves moving any @@ -3690,8 +3745,10 @@ def sample_move_instance(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py index 89973615b0..7bbdee1e7a 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/pagers.py @@ -67,7 +67,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -81,8 +81,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstanceConfigsRequest(request) @@ -143,7 +145,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -157,8 +159,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstanceConfigsRequest(request) @@ -225,7 +229,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -239,8 +243,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstanceConfigOperationsRequest( @@ -305,7 +311,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -319,8 +325,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstanceConfigOperationsRequest( @@ -387,7 +395,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -401,8 +409,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstancesRequest(request) @@ -461,7 +471,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -475,8 +485,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstancesRequest(request) @@ -541,7 +553,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -555,8 +567,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstancePartitionsRequest(request) @@ -617,7 +631,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -631,8 +645,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstancePartitionsRequest(request) @@ -699,7 +715,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -713,8 +729,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstancePartitionOperationsRequest( @@ -780,7 +798,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -794,8 +812,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner_instance_admin.ListInstancePartitionOperationsRequest( diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py index f4c1e97f09..e31c5c48b7 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin from google.iam.v1 import iam_policy_pb2 # type: ignore @@ -32,6 +38,81 @@ from google.protobuf import empty_pb2 # type: ignore from .base import InstanceAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class InstanceAdminGrpcTransport(InstanceAdminTransport): """gRPC backend transport for InstanceAdmin. @@ -208,7 +289,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -272,7 +358,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -288,6 +376,8 @@ def list_instance_configs( Lists the supported instance configurations for a given project. + Returns both Google-managed configurations and + user-managed configurations. Returns: Callable[[~.ListInstanceConfigsRequest], @@ -300,7 +390,7 @@ def list_instance_configs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instance_configs" not in self._stubs: - self._stubs["list_instance_configs"] = self.grpc_channel.unary_unary( + self._stubs["list_instance_configs"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", request_serializer=spanner_instance_admin.ListInstanceConfigsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstanceConfigsResponse.deserialize, @@ -330,7 +420,7 @@ def get_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance_config" not in self._stubs: - self._stubs["get_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["get_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", request_serializer=spanner_instance_admin.GetInstanceConfigRequest.serialize, response_deserializer=spanner_instance_admin.InstanceConfig.deserialize, @@ -346,8 +436,7 @@ def create_instance_config( r"""Return a callable for the create instance config method over gRPC. Creates an instance configuration and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, @@ -374,14 +463,12 @@ def create_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance configuration. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -400,7 +487,7 @@ def create_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance_config" not in self._stubs: - self._stubs["create_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["create_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig", request_serializer=spanner_instance_admin.CreateInstanceConfigRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -415,10 +502,10 @@ def update_instance_config( ]: r"""Return a callable for the update instance config method over gRPC. - Updates an instance configuration. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - configuration does not exist, returns ``NOT_FOUND``. + Updates an instance configuration. The returned long-running + operation can be used to track the progress of updating the + instance. If the named instance configuration does not exist, + returns ``NOT_FOUND``. Only user-managed configurations can be updated. @@ -450,15 +537,12 @@ def update_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance configuration modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -477,7 +561,7 @@ def update_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance_config" not in self._stubs: - self._stubs["update_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["update_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig", request_serializer=spanner_instance_admin.UpdateInstanceConfigRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -513,7 +597,7 @@ def delete_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance_config" not in self._stubs: - self._stubs["delete_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig", request_serializer=spanner_instance_admin.DeleteInstanceConfigRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -530,12 +614,11 @@ def list_instance_config_operations( r"""Return a callable for the list instance config operations method over gRPC. - Lists the user-managed instance configuration [long-running - operations][google.longrunning.Operation] in the given project. - An instance configuration operation has a name of the form + Lists the user-managed instance configuration long-running + operations in the given project. An instance configuration + operation has a name of the form ``projects//instanceConfigs//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -556,7 +639,7 @@ def list_instance_config_operations( if "list_instance_config_operations" not in self._stubs: self._stubs[ "list_instance_config_operations" - ] = self.grpc_channel.unary_unary( + ] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations", request_serializer=spanner_instance_admin.ListInstanceConfigOperationsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstanceConfigOperationsResponse.deserialize, @@ -585,7 +668,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", request_serializer=spanner_instance_admin.ListInstancesRequest.serialize, response_deserializer=spanner_instance_admin.ListInstancesResponse.deserialize, @@ -614,7 +697,7 @@ def list_instance_partitions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instance_partitions" not in self._stubs: - self._stubs["list_instance_partitions"] = self.grpc_channel.unary_unary( + self._stubs["list_instance_partitions"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions", request_serializer=spanner_instance_admin.ListInstancePartitionsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstancePartitionsResponse.deserialize, @@ -642,7 +725,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", request_serializer=spanner_instance_admin.GetInstanceRequest.serialize, response_deserializer=spanner_instance_admin.Instance.deserialize, @@ -658,9 +741,8 @@ def create_instance( r"""Return a callable for the create instance method over gRPC. Creates an instance and begins preparing it to begin serving. - The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance. The instance name is + The returned long-running operation can be used to track the + progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, ``CreateInstance`` returns ``ALREADY_EXISTS``. @@ -686,14 +768,13 @@ def create_instance( API. - The instance's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track creation of the instance. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track creation of the instance. The metadata field type + is [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Returns: @@ -707,7 +788,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", request_serializer=spanner_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -723,10 +804,9 @@ def update_instance( r"""Return a callable for the update instance method over gRPC. Updates an instance, and begins allocating or releasing - resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - does not exist, returns ``NOT_FOUND``. + resources as requested. The returned long-running operation can + be used to track the progress of updating the instance. If the + named instance does not exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -754,14 +834,13 @@ def update_instance( instance's tables. - The instance's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track the instance modification. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track the instance modification. The metadata field type + is [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Authorization requires ``spanner.instances.update`` permission @@ -779,7 +858,7 @@ def update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", request_serializer=spanner_instance_admin.UpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -815,7 +894,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", request_serializer=spanner_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -845,7 +924,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -876,7 +955,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -911,7 +990,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -941,7 +1020,7 @@ def get_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance_partition" not in self._stubs: - self._stubs["get_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["get_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition", request_serializer=spanner_instance_admin.GetInstancePartitionRequest.serialize, response_deserializer=spanner_instance_admin.InstancePartition.deserialize, @@ -958,8 +1037,7 @@ def create_instance_partition( r"""Return a callable for the create instance partition method over gRPC. Creates an instance partition and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance partition. The instance partition name is assigned by the caller. If the named instance partition already exists, ``CreateInstancePartition`` @@ -988,14 +1066,12 @@ def create_instance_partition( readable via the API. - The instance partition's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance partition. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -1010,7 +1086,7 @@ def create_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance_partition" not in self._stubs: - self._stubs["create_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["create_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition", request_serializer=spanner_instance_admin.CreateInstancePartitionRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1044,7 +1120,7 @@ def delete_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance_partition" not in self._stubs: - self._stubs["delete_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition", request_serializer=spanner_instance_admin.DeleteInstancePartitionRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1061,10 +1137,10 @@ def update_instance_partition( r"""Return a callable for the update instance partition method over gRPC. Updates an instance partition, and begins allocating or - releasing resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance partition. If the named - instance partition does not exist, returns ``NOT_FOUND``. + releasing resources as requested. The returned long-running + operation can be used to track the progress of updating the + instance partition. If the named instance partition does not + exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -1094,15 +1170,12 @@ def update_instance_partition( - The instance partition's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance partition modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -1121,7 +1194,7 @@ def update_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance_partition" not in self._stubs: - self._stubs["update_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["update_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition", request_serializer=spanner_instance_admin.UpdateInstancePartitionRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1138,12 +1211,10 @@ def list_instance_partition_operations( r"""Return a callable for the list instance partition operations method over gRPC. - Lists instance partition [long-running - operations][google.longrunning.Operation] in the given instance. - An instance partition operation has a name of the form + Lists instance partition long-running operations in the given + instance. An instance partition operation has a name of the form ``projects//instances//instancePartitions//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -1169,7 +1240,7 @@ def list_instance_partition_operations( if "list_instance_partition_operations" not in self._stubs: self._stubs[ "list_instance_partition_operations" - ] = self.grpc_channel.unary_unary( + ] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations", request_serializer=spanner_instance_admin.ListInstancePartitionOperationsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstancePartitionOperationsResponse.deserialize, @@ -1185,9 +1256,8 @@ def move_instance( r"""Return a callable for the move instance method over gRPC. Moves an instance to the target instance configuration. You can - use the returned [long-running - operation][google.longrunning.Operation] to track the progress - of moving the instance. + use the returned long-running operation to track the progress of + moving the instance. ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance meets any of the following criteria: @@ -1220,14 +1290,12 @@ def move_instance( a higher transaction abort rate. However, moving an instance doesn't cause any downtime. - The returned [long-running - operation][google.longrunning.Operation] has a name of the - format ``/operations/`` and can be - used to track the move instance operation. The - [metadata][google.longrunning.Operation.metadata] field type is + The returned long-running operation has a name of the format + ``/operations/`` and can be used to + track the move instance operation. The metadata field type is [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation is not immediate because it involves moving any @@ -1262,7 +1330,7 @@ def move_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "move_instance" not in self._stubs: - self._stubs["move_instance"] = self.grpc_channel.unary_unary( + self._stubs["move_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", request_serializer=spanner_instance_admin.MoveInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1270,7 +1338,7 @@ def move_instance( return self._stubs["move_instance"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py index c3a0cb107a..2b382a0085 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -24,8 +27,11 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.spanner_admin_instance_v1.types import spanner_instance_admin @@ -36,6 +42,82 @@ from .base import InstanceAdminTransport, DEFAULT_CLIENT_INFO from .grpc import InstanceAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class InstanceAdminGrpcAsyncIOTransport(InstanceAdminTransport): """gRPC AsyncIO backend transport for InstanceAdmin. @@ -255,10 +337,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -281,7 +366,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -298,6 +383,8 @@ def list_instance_configs( Lists the supported instance configurations for a given project. + Returns both Google-managed configurations and + user-managed configurations. Returns: Callable[[~.ListInstanceConfigsRequest], @@ -310,7 +397,7 @@ def list_instance_configs( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instance_configs" not in self._stubs: - self._stubs["list_instance_configs"] = self.grpc_channel.unary_unary( + self._stubs["list_instance_configs"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigs", request_serializer=spanner_instance_admin.ListInstanceConfigsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstanceConfigsResponse.deserialize, @@ -340,7 +427,7 @@ def get_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance_config" not in self._stubs: - self._stubs["get_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["get_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstanceConfig", request_serializer=spanner_instance_admin.GetInstanceConfigRequest.serialize, response_deserializer=spanner_instance_admin.InstanceConfig.deserialize, @@ -357,8 +444,7 @@ def create_instance_config( r"""Return a callable for the create instance config method over gRPC. Creates an instance configuration and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance configuration. The instance configuration name is assigned by the caller. If the named instance configuration already exists, @@ -385,14 +471,12 @@ def create_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. Its state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance configuration. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstanceConfigMetadata][google.spanner.admin.instance.v1.CreateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -411,7 +495,7 @@ def create_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance_config" not in self._stubs: - self._stubs["create_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["create_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstanceConfig", request_serializer=spanner_instance_admin.CreateInstanceConfigRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -427,10 +511,10 @@ def update_instance_config( ]: r"""Return a callable for the update instance config method over gRPC. - Updates an instance configuration. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - configuration does not exist, returns ``NOT_FOUND``. + Updates an instance configuration. The returned long-running + operation can be used to track the progress of updating the + instance. If the named instance configuration does not exist, + returns ``NOT_FOUND``. Only user-managed configurations can be updated. @@ -462,15 +546,12 @@ def update_instance_config( [reconciling][google.spanner.admin.instance.v1.InstanceConfig.reconciling] field becomes false. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance configuration modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstanceConfigMetadata][google.spanner.admin.instance.v1.UpdateInstanceConfigMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig], if successful. @@ -489,7 +570,7 @@ def update_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance_config" not in self._stubs: - self._stubs["update_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["update_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstanceConfig", request_serializer=spanner_instance_admin.UpdateInstanceConfigRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -525,7 +606,7 @@ def delete_instance_config( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance_config" not in self._stubs: - self._stubs["delete_instance_config"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance_config"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstanceConfig", request_serializer=spanner_instance_admin.DeleteInstanceConfigRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -542,12 +623,11 @@ def list_instance_config_operations( r"""Return a callable for the list instance config operations method over gRPC. - Lists the user-managed instance configuration [long-running - operations][google.longrunning.Operation] in the given project. - An instance configuration operation has a name of the form + Lists the user-managed instance configuration long-running + operations in the given project. An instance configuration + operation has a name of the form ``projects//instanceConfigs//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -568,7 +648,7 @@ def list_instance_config_operations( if "list_instance_config_operations" not in self._stubs: self._stubs[ "list_instance_config_operations" - ] = self.grpc_channel.unary_unary( + ] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstanceConfigOperations", request_serializer=spanner_instance_admin.ListInstanceConfigOperationsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstanceConfigOperationsResponse.deserialize, @@ -597,7 +677,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstances", request_serializer=spanner_instance_admin.ListInstancesRequest.serialize, response_deserializer=spanner_instance_admin.ListInstancesResponse.deserialize, @@ -626,7 +706,7 @@ def list_instance_partitions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instance_partitions" not in self._stubs: - self._stubs["list_instance_partitions"] = self.grpc_channel.unary_unary( + self._stubs["list_instance_partitions"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitions", request_serializer=spanner_instance_admin.ListInstancePartitionsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstancePartitionsResponse.deserialize, @@ -655,7 +735,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstance", request_serializer=spanner_instance_admin.GetInstanceRequest.serialize, response_deserializer=spanner_instance_admin.Instance.deserialize, @@ -672,9 +752,8 @@ def create_instance( r"""Return a callable for the create instance method over gRPC. Creates an instance and begins preparing it to begin serving. - The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of preparing the new instance. The instance name is + The returned long-running operation can be used to track the + progress of preparing the new instance. The instance name is assigned by the caller. If the named instance already exists, ``CreateInstance`` returns ``ALREADY_EXISTS``. @@ -700,14 +779,13 @@ def create_instance( API. - The instance's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track creation of the instance. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track creation of the instance. The metadata field type + is [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Returns: @@ -721,7 +799,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstance", request_serializer=spanner_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -738,10 +816,9 @@ def update_instance( r"""Return a callable for the update instance method over gRPC. Updates an instance, and begins allocating or releasing - resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance. If the named instance - does not exist, returns ``NOT_FOUND``. + resources as requested. The returned long-running operation can + be used to track the progress of updating the instance. If the + named instance does not exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -769,14 +846,13 @@ def update_instance( instance's tables. - The instance's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be - used to track the instance modification. The - [metadata][google.longrunning.Operation.metadata] field type is + used to track the instance modification. The metadata field type + is [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Authorization requires ``spanner.instances.update`` permission @@ -794,7 +870,7 @@ def update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstance", request_serializer=spanner_instance_admin.UpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -832,7 +908,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstance", request_serializer=spanner_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -862,7 +938,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -893,7 +969,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -928,7 +1004,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -958,7 +1034,7 @@ def get_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance_partition" not in self._stubs: - self._stubs["get_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["get_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/GetInstancePartition", request_serializer=spanner_instance_admin.GetInstancePartitionRequest.serialize, response_deserializer=spanner_instance_admin.InstancePartition.deserialize, @@ -975,8 +1051,7 @@ def create_instance_partition( r"""Return a callable for the create instance partition method over gRPC. Creates an instance partition and begins preparing it to be - used. The returned [long-running - operation][google.longrunning.Operation] can be used to track + used. The returned long-running operation can be used to track the progress of preparing the new instance partition. The instance partition name is assigned by the caller. If the named instance partition already exists, ``CreateInstancePartition`` @@ -1005,14 +1080,12 @@ def create_instance_partition( readable via the API. - The instance partition's state becomes ``READY``. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track creation of the instance partition. The - [metadata][google.longrunning.Operation.metadata] field type is + metadata field type is [CreateInstancePartitionMetadata][google.spanner.admin.instance.v1.CreateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -1027,7 +1100,7 @@ def create_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance_partition" not in self._stubs: - self._stubs["create_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["create_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/CreateInstancePartition", request_serializer=spanner_instance_admin.CreateInstancePartitionRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1062,7 +1135,7 @@ def delete_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance_partition" not in self._stubs: - self._stubs["delete_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/DeleteInstancePartition", request_serializer=spanner_instance_admin.DeleteInstancePartitionRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1079,10 +1152,10 @@ def update_instance_partition( r"""Return a callable for the update instance partition method over gRPC. Updates an instance partition, and begins allocating or - releasing resources as requested. The returned [long-running - operation][google.longrunning.Operation] can be used to track - the progress of updating the instance partition. If the named - instance partition does not exist, returns ``NOT_FOUND``. + releasing resources as requested. The returned long-running + operation can be used to track the progress of updating the + instance partition. If the named instance partition does not + exist, returns ``NOT_FOUND``. Immediately upon completion of this request: @@ -1112,15 +1185,12 @@ def update_instance_partition( - The instance partition's new resource levels are readable via the API. - The returned [long-running - operation][google.longrunning.Operation] will have a name of the + The returned long-running operation will have a name of the format ``/operations/`` and can be used to track the instance partition modification. - The [metadata][google.longrunning.Operation.metadata] field type - is + The metadata field type is [UpdateInstancePartitionMetadata][google.spanner.admin.instance.v1.UpdateInstancePartitionMetadata]. - The [response][google.longrunning.Operation.response] field type - is + The response field type is [InstancePartition][google.spanner.admin.instance.v1.InstancePartition], if successful. @@ -1139,7 +1209,7 @@ def update_instance_partition( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance_partition" not in self._stubs: - self._stubs["update_instance_partition"] = self.grpc_channel.unary_unary( + self._stubs["update_instance_partition"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/UpdateInstancePartition", request_serializer=spanner_instance_admin.UpdateInstancePartitionRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1156,12 +1226,10 @@ def list_instance_partition_operations( r"""Return a callable for the list instance partition operations method over gRPC. - Lists instance partition [long-running - operations][google.longrunning.Operation] in the given instance. - An instance partition operation has a name of the form + Lists instance partition long-running operations in the given + instance. An instance partition operation has a name of the form ``projects//instances//instancePartitions//operations/``. - The long-running operation - [metadata][google.longrunning.Operation.metadata] field type + The long-running operation metadata field type ``metadata.type_url`` describes the type of the metadata. Operations returned include those that have completed/failed/canceled within the last 7 days, and pending @@ -1187,7 +1255,7 @@ def list_instance_partition_operations( if "list_instance_partition_operations" not in self._stubs: self._stubs[ "list_instance_partition_operations" - ] = self.grpc_channel.unary_unary( + ] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/ListInstancePartitionOperations", request_serializer=spanner_instance_admin.ListInstancePartitionOperationsRequest.serialize, response_deserializer=spanner_instance_admin.ListInstancePartitionOperationsResponse.deserialize, @@ -1204,9 +1272,8 @@ def move_instance( r"""Return a callable for the move instance method over gRPC. Moves an instance to the target instance configuration. You can - use the returned [long-running - operation][google.longrunning.Operation] to track the progress - of moving the instance. + use the returned long-running operation to track the progress of + moving the instance. ``MoveInstance`` returns ``FAILED_PRECONDITION`` if the instance meets any of the following criteria: @@ -1239,14 +1306,12 @@ def move_instance( a higher transaction abort rate. However, moving an instance doesn't cause any downtime. - The returned [long-running - operation][google.longrunning.Operation] has a name of the - format ``/operations/`` and can be - used to track the move instance operation. The - [metadata][google.longrunning.Operation.metadata] field type is + The returned long-running operation has a name of the format + ``/operations/`` and can be used to + track the move instance operation. The metadata field type is [MoveInstanceMetadata][google.spanner.admin.instance.v1.MoveInstanceMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Instance][google.spanner.admin.instance.v1.Instance], if + The response field type is + [Instance][google.spanner.admin.instance.v1.Instance], if successful. Cancelling the operation sets its metadata's [cancel_time][google.spanner.admin.instance.v1.MoveInstanceMetadata.cancel_time]. Cancellation is not immediate because it involves moving any @@ -1281,7 +1346,7 @@ def move_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "move_instance" not in self._stubs: - self._stubs["move_instance"] = self.grpc_channel.unary_unary( + self._stubs["move_instance"] = self._logged_channel.unary_unary( "/google.spanner.admin.instance.v1.InstanceAdmin/MoveInstance", request_serializer=spanner_instance_admin.MoveInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1464,7 +1529,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py index e982ec039e..a728491812 100644 --- a/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py +++ b/google/cloud/spanner_admin_instance_v1/services/instance_admin/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -47,6 +48,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -235,8 +244,11 @@ def post_update_instance_partition(self, response): def pre_create_instance( self, request: spanner_instance_admin.CreateInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.CreateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_instance Override in a subclass to manipulate the request or metadata @@ -249,18 +261,42 @@ def post_create_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_instance` interceptor runs + before the `post_create_instance_with_metadata` interceptor. """ return response + def post_create_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_instance_with_metadata` + interceptor in new development instead of the `post_create_instance` interceptor. + When both interceptors are used, this `post_create_instance_with_metadata` interceptor runs after the + `post_create_instance` interceptor. The (possibly modified) response returned by + `post_create_instance` will be passed to + `post_create_instance_with_metadata`. + """ + return response, metadata + def pre_create_instance_config( self, request: spanner_instance_admin.CreateInstanceConfigRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.CreateInstanceConfigRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.CreateInstanceConfigRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_instance_config @@ -274,18 +310,42 @@ def post_create_instance_config( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_instance_config - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_instance_config_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_instance_config` interceptor runs + before the `post_create_instance_config_with_metadata` interceptor. """ return response + def post_create_instance_config_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_instance_config + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_instance_config_with_metadata` + interceptor in new development instead of the `post_create_instance_config` interceptor. + When both interceptors are used, this `post_create_instance_config_with_metadata` interceptor runs after the + `post_create_instance_config` interceptor. The (possibly modified) response returned by + `post_create_instance_config` will be passed to + `post_create_instance_config_with_metadata`. + """ + return response, metadata + def pre_create_instance_partition( self, request: spanner_instance_admin.CreateInstancePartitionRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.CreateInstancePartitionRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.CreateInstancePartitionRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_instance_partition @@ -299,17 +359,43 @@ def post_create_instance_partition( ) -> operations_pb2.Operation: """Post-rpc interceptor for create_instance_partition - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_instance_partition_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_create_instance_partition` interceptor runs + before the `post_create_instance_partition_with_metadata` interceptor. """ return response + def post_create_instance_partition_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_instance_partition + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_create_instance_partition_with_metadata` + interceptor in new development instead of the `post_create_instance_partition` interceptor. + When both interceptors are used, this `post_create_instance_partition_with_metadata` interceptor runs after the + `post_create_instance_partition` interceptor. The (possibly modified) response returned by + `post_create_instance_partition` will be passed to + `post_create_instance_partition_with_metadata`. + """ + return response, metadata + def pre_delete_instance( self, request: spanner_instance_admin.DeleteInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.DeleteInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_instance Override in a subclass to manipulate the request or metadata @@ -320,9 +406,10 @@ def pre_delete_instance( def pre_delete_instance_config( self, request: spanner_instance_admin.DeleteInstanceConfigRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.DeleteInstanceConfigRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.DeleteInstanceConfigRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_instance_config @@ -334,9 +421,10 @@ def pre_delete_instance_config( def pre_delete_instance_partition( self, request: spanner_instance_admin.DeleteInstancePartitionRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.DeleteInstancePartitionRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.DeleteInstancePartitionRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_instance_partition @@ -348,8 +436,10 @@ def pre_delete_instance_partition( def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -360,17 +450,43 @@ def pre_get_iam_policy( def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for get_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_iam_policy` interceptor runs + before the `post_get_iam_policy_with_metadata` interceptor. """ return response + def post_get_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_iam_policy_with_metadata` + interceptor in new development instead of the `post_get_iam_policy` interceptor. + When both interceptors are used, this `post_get_iam_policy_with_metadata` interceptor runs after the + `post_get_iam_policy` interceptor. The (possibly modified) response returned by + `post_get_iam_policy` will be passed to + `post_get_iam_policy_with_metadata`. + """ + return response, metadata + def pre_get_instance( self, request: spanner_instance_admin.GetInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.GetInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_instance Override in a subclass to manipulate the request or metadata @@ -383,18 +499,44 @@ def post_get_instance( ) -> spanner_instance_admin.Instance: """Post-rpc interceptor for get_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_instance` interceptor runs + before the `post_get_instance_with_metadata` interceptor. """ return response + def post_get_instance_with_metadata( + self, + response: spanner_instance_admin.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.Instance, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_instance_with_metadata` + interceptor in new development instead of the `post_get_instance` interceptor. + When both interceptors are used, this `post_get_instance_with_metadata` interceptor runs after the + `post_get_instance` interceptor. The (possibly modified) response returned by + `post_get_instance` will be passed to + `post_get_instance_with_metadata`. + """ + return response, metadata + def pre_get_instance_config( self, request: spanner_instance_admin.GetInstanceConfigRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.GetInstanceConfigRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.GetInstanceConfigRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for get_instance_config @@ -408,18 +550,44 @@ def post_get_instance_config( ) -> spanner_instance_admin.InstanceConfig: """Post-rpc interceptor for get_instance_config - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_instance_config_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_instance_config` interceptor runs + before the `post_get_instance_config_with_metadata` interceptor. """ return response + def post_get_instance_config_with_metadata( + self, + response: spanner_instance_admin.InstanceConfig, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.InstanceConfig, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for get_instance_config + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_instance_config_with_metadata` + interceptor in new development instead of the `post_get_instance_config` interceptor. + When both interceptors are used, this `post_get_instance_config_with_metadata` interceptor runs after the + `post_get_instance_config` interceptor. The (possibly modified) response returned by + `post_get_instance_config` will be passed to + `post_get_instance_config_with_metadata`. + """ + return response, metadata + def pre_get_instance_partition( self, request: spanner_instance_admin.GetInstancePartitionRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.GetInstancePartitionRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.GetInstancePartitionRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for get_instance_partition @@ -433,19 +601,45 @@ def post_get_instance_partition( ) -> spanner_instance_admin.InstancePartition: """Post-rpc interceptor for get_instance_partition - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_instance_partition_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_get_instance_partition` interceptor runs + before the `post_get_instance_partition_with_metadata` interceptor. """ return response + def post_get_instance_partition_with_metadata( + self, + response: spanner_instance_admin.InstancePartition, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.InstancePartition, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for get_instance_partition + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_get_instance_partition_with_metadata` + interceptor in new development instead of the `post_get_instance_partition` interceptor. + When both interceptors are used, this `post_get_instance_partition_with_metadata` interceptor runs after the + `post_get_instance_partition` interceptor. The (possibly modified) response returned by + `post_get_instance_partition` will be passed to + `post_get_instance_partition_with_metadata`. + """ + return response, metadata + def pre_list_instance_config_operations( self, request: spanner_instance_admin.ListInstanceConfigOperationsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ spanner_instance_admin.ListInstanceConfigOperationsRequest, - Sequence[Tuple[str, str]], + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_instance_config_operations @@ -459,18 +653,45 @@ def post_list_instance_config_operations( ) -> spanner_instance_admin.ListInstanceConfigOperationsResponse: """Post-rpc interceptor for list_instance_config_operations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instance_config_operations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_instance_config_operations` interceptor runs + before the `post_list_instance_config_operations_with_metadata` interceptor. """ return response + def post_list_instance_config_operations_with_metadata( + self, + response: spanner_instance_admin.ListInstanceConfigOperationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.ListInstanceConfigOperationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_instance_config_operations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instance_config_operations_with_metadata` + interceptor in new development instead of the `post_list_instance_config_operations` interceptor. + When both interceptors are used, this `post_list_instance_config_operations_with_metadata` interceptor runs after the + `post_list_instance_config_operations` interceptor. The (possibly modified) response returned by + `post_list_instance_config_operations` will be passed to + `post_list_instance_config_operations_with_metadata`. + """ + return response, metadata + def pre_list_instance_configs( self, request: spanner_instance_admin.ListInstanceConfigsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.ListInstanceConfigsRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.ListInstanceConfigsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_instance_configs @@ -484,19 +705,45 @@ def post_list_instance_configs( ) -> spanner_instance_admin.ListInstanceConfigsResponse: """Post-rpc interceptor for list_instance_configs - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instance_configs_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_instance_configs` interceptor runs + before the `post_list_instance_configs_with_metadata` interceptor. """ return response + def post_list_instance_configs_with_metadata( + self, + response: spanner_instance_admin.ListInstanceConfigsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.ListInstanceConfigsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_instance_configs + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instance_configs_with_metadata` + interceptor in new development instead of the `post_list_instance_configs` interceptor. + When both interceptors are used, this `post_list_instance_configs_with_metadata` interceptor runs after the + `post_list_instance_configs` interceptor. The (possibly modified) response returned by + `post_list_instance_configs` will be passed to + `post_list_instance_configs_with_metadata`. + """ + return response, metadata + def pre_list_instance_partition_operations( self, request: spanner_instance_admin.ListInstancePartitionOperationsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ spanner_instance_admin.ListInstancePartitionOperationsRequest, - Sequence[Tuple[str, str]], + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_instance_partition_operations @@ -510,18 +757,45 @@ def post_list_instance_partition_operations( ) -> spanner_instance_admin.ListInstancePartitionOperationsResponse: """Post-rpc interceptor for list_instance_partition_operations - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instance_partition_operations_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_instance_partition_operations` interceptor runs + before the `post_list_instance_partition_operations_with_metadata` interceptor. """ return response + def post_list_instance_partition_operations_with_metadata( + self, + response: spanner_instance_admin.ListInstancePartitionOperationsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.ListInstancePartitionOperationsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_instance_partition_operations + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instance_partition_operations_with_metadata` + interceptor in new development instead of the `post_list_instance_partition_operations` interceptor. + When both interceptors are used, this `post_list_instance_partition_operations_with_metadata` interceptor runs after the + `post_list_instance_partition_operations` interceptor. The (possibly modified) response returned by + `post_list_instance_partition_operations` will be passed to + `post_list_instance_partition_operations_with_metadata`. + """ + return response, metadata + def pre_list_instance_partitions( self, request: spanner_instance_admin.ListInstancePartitionsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.ListInstancePartitionsRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.ListInstancePartitionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_instance_partitions @@ -535,17 +809,46 @@ def post_list_instance_partitions( ) -> spanner_instance_admin.ListInstancePartitionsResponse: """Post-rpc interceptor for list_instance_partitions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instance_partitions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_instance_partitions` interceptor runs + before the `post_list_instance_partitions_with_metadata` interceptor. """ return response + def post_list_instance_partitions_with_metadata( + self, + response: spanner_instance_admin.ListInstancePartitionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.ListInstancePartitionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_instance_partitions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instance_partitions_with_metadata` + interceptor in new development instead of the `post_list_instance_partitions` interceptor. + When both interceptors are used, this `post_list_instance_partitions_with_metadata` interceptor runs after the + `post_list_instance_partitions` interceptor. The (possibly modified) response returned by + `post_list_instance_partitions` will be passed to + `post_list_instance_partitions_with_metadata`. + """ + return response, metadata + def pre_list_instances( self, request: spanner_instance_admin.ListInstancesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.ListInstancesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_instances Override in a subclass to manipulate the request or metadata @@ -558,17 +861,46 @@ def post_list_instances( ) -> spanner_instance_admin.ListInstancesResponse: """Post-rpc interceptor for list_instances - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_instances_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_list_instances` interceptor runs + before the `post_list_instances_with_metadata` interceptor. """ return response + def post_list_instances_with_metadata( + self, + response: spanner_instance_admin.ListInstancesResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.ListInstancesResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for list_instances + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_list_instances_with_metadata` + interceptor in new development instead of the `post_list_instances` interceptor. + When both interceptors are used, this `post_list_instances_with_metadata` interceptor runs after the + `post_list_instances` interceptor. The (possibly modified) response returned by + `post_list_instances` will be passed to + `post_list_instances_with_metadata`. + """ + return response, metadata + def pre_move_instance( self, request: spanner_instance_admin.MoveInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_instance_admin.MoveInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.MoveInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for move_instance Override in a subclass to manipulate the request or metadata @@ -581,17 +913,42 @@ def post_move_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for move_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_move_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_move_instance` interceptor runs + before the `post_move_instance_with_metadata` interceptor. """ return response + def post_move_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for move_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_move_instance_with_metadata` + interceptor in new development instead of the `post_move_instance` interceptor. + When both interceptors are used, this `post_move_instance_with_metadata` interceptor runs after the + `post_move_instance` interceptor. The (possibly modified) response returned by + `post_move_instance` will be passed to + `post_move_instance_with_metadata`. + """ + return response, metadata + def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -602,17 +959,43 @@ def pre_set_iam_policy( def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: """Post-rpc interceptor for set_iam_policy - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_set_iam_policy_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_set_iam_policy` interceptor runs + before the `post_set_iam_policy_with_metadata` interceptor. """ return response + def post_set_iam_policy_with_metadata( + self, + response: policy_pb2.Policy, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[policy_pb2.Policy, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_set_iam_policy_with_metadata` + interceptor in new development instead of the `post_set_iam_policy` interceptor. + When both interceptors are used, this `post_set_iam_policy_with_metadata` interceptor runs after the + `post_set_iam_policy` interceptor. The (possibly modified) response returned by + `post_set_iam_policy` will be passed to + `post_set_iam_policy_with_metadata`. + """ + return response, metadata + def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -625,17 +1008,46 @@ def post_test_iam_permissions( ) -> iam_policy_pb2.TestIamPermissionsResponse: """Post-rpc interceptor for test_iam_permissions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_test_iam_permissions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_test_iam_permissions` interceptor runs + before the `post_test_iam_permissions_with_metadata` interceptor. """ return response + def post_test_iam_permissions_with_metadata( + self, + response: iam_policy_pb2.TestIamPermissionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsResponse, + Sequence[Tuple[str, Union[str, bytes]]], + ]: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_test_iam_permissions_with_metadata` + interceptor in new development instead of the `post_test_iam_permissions` interceptor. + When both interceptors are used, this `post_test_iam_permissions_with_metadata` interceptor runs after the + `post_test_iam_permissions` interceptor. The (possibly modified) response returned by + `post_test_iam_permissions` will be passed to + `post_test_iam_permissions_with_metadata`. + """ + return response, metadata + def pre_update_instance( self, request: spanner_instance_admin.UpdateInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner_instance_admin.UpdateInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner_instance_admin.UpdateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for update_instance Override in a subclass to manipulate the request or metadata @@ -648,18 +1060,42 @@ def post_update_instance( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_instance - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_instance_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_instance` interceptor runs + before the `post_update_instance_with_metadata` interceptor. """ return response + def post_update_instance_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_instance + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_instance_with_metadata` + interceptor in new development instead of the `post_update_instance` interceptor. + When both interceptors are used, this `post_update_instance_with_metadata` interceptor runs after the + `post_update_instance` interceptor. The (possibly modified) response returned by + `post_update_instance` will be passed to + `post_update_instance_with_metadata`. + """ + return response, metadata + def pre_update_instance_config( self, request: spanner_instance_admin.UpdateInstanceConfigRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.UpdateInstanceConfigRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.UpdateInstanceConfigRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_instance_config @@ -673,18 +1109,42 @@ def post_update_instance_config( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_instance_config - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_instance_config_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_instance_config` interceptor runs + before the `post_update_instance_config_with_metadata` interceptor. """ return response + def post_update_instance_config_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_instance_config + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_instance_config_with_metadata` + interceptor in new development instead of the `post_update_instance_config` interceptor. + When both interceptors are used, this `post_update_instance_config_with_metadata` interceptor runs after the + `post_update_instance_config` interceptor. The (possibly modified) response returned by + `post_update_instance_config` will be passed to + `post_update_instance_config_with_metadata`. + """ + return response, metadata + def pre_update_instance_partition( self, request: spanner_instance_admin.UpdateInstancePartitionRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - spanner_instance_admin.UpdateInstancePartitionRequest, Sequence[Tuple[str, str]] + spanner_instance_admin.UpdateInstancePartitionRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_instance_partition @@ -698,12 +1158,35 @@ def post_update_instance_partition( ) -> operations_pb2.Operation: """Post-rpc interceptor for update_instance_partition - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_update_instance_partition_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the InstanceAdmin server but before - it is returned to user code. + it is returned to user code. This `post_update_instance_partition` interceptor runs + before the `post_update_instance_partition_with_metadata` interceptor. """ return response + def post_update_instance_partition_with_metadata( + self, + response: operations_pb2.Operation, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[operations_pb2.Operation, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for update_instance_partition + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the InstanceAdmin server but before it is returned to user code. + + We recommend only using this `post_update_instance_partition_with_metadata` + interceptor in new development instead of the `post_update_instance_partition` interceptor. + When both interceptors are used, this `post_update_instance_partition_with_metadata` interceptor runs after the + `post_update_instance_partition` interceptor. The (possibly modified) response returned by + `post_update_instance_partition` will be passed to + `post_update_instance_partition_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class InstanceAdminRestStub: @@ -917,7 +1400,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create instance method over HTTP. @@ -928,8 +1411,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -942,6 +1427,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseCreateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_create_instance(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseCreateInstance._get_transcoded_request( http_options, request @@ -956,6 +1442,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.CreateInstance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "CreateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._CreateInstance._get_response( self._host, @@ -975,7 +1488,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.create_instance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "CreateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateInstanceConfig( @@ -1013,19 +1552,21 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create instance config method over HTTP. Args: request (~.spanner_instance_admin.CreateInstanceConfigRequest): The request object. The request for - [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest]. + [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1038,6 +1579,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseCreateInstanceConfig._get_http_options() ) + request, metadata = self._interceptor.pre_create_instance_config( request, metadata ) @@ -1054,6 +1596,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.CreateInstanceConfig", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "CreateInstanceConfig", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._CreateInstanceConfig._get_response( self._host, @@ -1073,7 +1642,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance_config(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_instance_config_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.create_instance_config", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "CreateInstanceConfig", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateInstancePartition( @@ -1112,7 +1707,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create instance partition method over HTTP. @@ -1123,8 +1718,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1137,6 +1734,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseCreateInstancePartition._get_http_options() ) + request, metadata = self._interceptor.pre_create_instance_partition( request, metadata ) @@ -1153,6 +1751,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.CreateInstancePartition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "CreateInstancePartition", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( InstanceAdminRestTransport._CreateInstancePartition._get_response( @@ -1174,7 +1799,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance_partition(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_instance_partition_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.create_instance_partition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "CreateInstancePartition", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteInstance( @@ -1211,7 +1862,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete instance method over HTTP. @@ -1222,13 +1873,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() ) + request, metadata = self._interceptor.pre_delete_instance(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( http_options, request @@ -1239,6 +1893,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.DeleteInstance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "DeleteInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._DeleteInstance._get_response( self._host, @@ -1288,24 +1969,27 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete instance config method over HTTP. Args: request (~.spanner_instance_admin.DeleteInstanceConfigRequest): The request object. The request for - [DeleteInstanceConfigRequest][InstanceAdmin.DeleteInstanceConfigRequest]. + [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseInstanceAdminRestTransport._BaseDeleteInstanceConfig._get_http_options() ) + request, metadata = self._interceptor.pre_delete_instance_config( request, metadata ) @@ -1318,6 +2002,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.DeleteInstanceConfig", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "DeleteInstanceConfig", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._DeleteInstanceConfig._get_response( self._host, @@ -1368,7 +2079,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete instance partition method over HTTP. @@ -1379,13 +2090,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseInstanceAdminRestTransport._BaseDeleteInstancePartition._get_http_options() ) + request, metadata = self._interceptor.pre_delete_instance_partition( request, metadata ) @@ -1398,6 +2112,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.DeleteInstancePartition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "DeleteInstancePartition", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( InstanceAdminRestTransport._DeleteInstancePartition._get_response( @@ -1450,7 +2191,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -1460,8 +2201,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -1546,6 +2289,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseGetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( http_options, request @@ -1560,6 +2304,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._GetIamPolicy._get_response( self._host, @@ -1581,7 +2352,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.get_iam_policy", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetInstance( @@ -1618,7 +2415,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.Instance: r"""Call the get instance method over HTTP. @@ -1629,8 +2426,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.Instance: @@ -1643,6 +2442,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseGetInstance._get_http_options() ) + request, metadata = self._interceptor.pre_get_instance(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( http_options, request @@ -1655,6 +2455,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.GetInstance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._GetInstance._get_response( self._host, @@ -1675,7 +2502,33 @@ def __call__( pb_resp = spanner_instance_admin.Instance.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner_instance_admin.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.get_instance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetInstanceConfig( @@ -1712,7 +2565,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.InstanceConfig: r"""Call the get instance config method over HTTP. @@ -1723,8 +2576,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.InstanceConfig: @@ -1738,6 +2593,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseGetInstanceConfig._get_http_options() ) + request, metadata = self._interceptor.pre_get_instance_config( request, metadata ) @@ -1750,6 +2606,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.GetInstanceConfig", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetInstanceConfig", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._GetInstanceConfig._get_response( self._host, @@ -1770,7 +2653,35 @@ def __call__( pb_resp = spanner_instance_admin.InstanceConfig.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_instance_config(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_instance_config_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner_instance_admin.InstanceConfig.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.get_instance_config", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetInstanceConfig", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetInstancePartition( @@ -1807,7 +2718,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.InstancePartition: r"""Call the get instance partition method over HTTP. @@ -1818,8 +2729,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.InstancePartition: @@ -1832,6 +2745,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseGetInstancePartition._get_http_options() ) + request, metadata = self._interceptor.pre_get_instance_partition( request, metadata ) @@ -1844,6 +2758,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.GetInstancePartition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetInstancePartition", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._GetInstancePartition._get_response( self._host, @@ -1864,7 +2805,35 @@ def __call__( pb_resp = spanner_instance_admin.InstancePartition.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_instance_partition(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_instance_partition_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner_instance_admin.InstancePartition.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.get_instance_partition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "GetInstancePartition", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstanceConfigOperations( @@ -1902,7 +2871,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.ListInstanceConfigOperationsResponse: r"""Call the list instance config operations method over HTTP. @@ -1914,8 +2883,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.ListInstanceConfigOperationsResponse: @@ -1927,6 +2898,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseListInstanceConfigOperations._get_http_options() ) + request, metadata = self._interceptor.pre_list_instance_config_operations( request, metadata ) @@ -1939,6 +2911,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.ListInstanceConfigOperations", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstanceConfigOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( InstanceAdminRestTransport._ListInstanceConfigOperations._get_response( @@ -1963,7 +2962,38 @@ def __call__( ) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instance_config_operations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_list_instance_config_operations_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner_instance_admin.ListInstanceConfigOperationsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.list_instance_config_operations", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstanceConfigOperations", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstanceConfigs( @@ -2000,7 +3030,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.ListInstanceConfigsResponse: r"""Call the list instance configs method over HTTP. @@ -2011,8 +3041,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.ListInstanceConfigsResponse: @@ -2024,6 +3056,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseListInstanceConfigs._get_http_options() ) + request, metadata = self._interceptor.pre_list_instance_configs( request, metadata ) @@ -2036,6 +3069,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.ListInstanceConfigs", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstanceConfigs", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._ListInstanceConfigs._get_response( self._host, @@ -2056,7 +3116,37 @@ def __call__( pb_resp = spanner_instance_admin.ListInstanceConfigsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instance_configs(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_instance_configs_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_instance_admin.ListInstanceConfigsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.list_instance_configs", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstanceConfigs", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstancePartitionOperations( @@ -2094,7 +3184,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.ListInstancePartitionOperationsResponse: r"""Call the list instance partition operations method over HTTP. @@ -2106,8 +3196,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.ListInstancePartitionOperationsResponse: @@ -2119,6 +3211,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseListInstancePartitionOperations._get_http_options() ) + ( request, metadata, @@ -2134,6 +3227,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.ListInstancePartitionOperations", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstancePartitionOperations", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._ListInstancePartitionOperations._get_response( self._host, @@ -2156,7 +3276,38 @@ def __call__( ) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instance_partition_operations(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + ( + resp, + _, + ) = self._interceptor.post_list_instance_partition_operations_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner_instance_admin.ListInstancePartitionOperationsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.list_instance_partition_operations", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstancePartitionOperations", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstancePartitions( @@ -2194,7 +3345,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.ListInstancePartitionsResponse: r"""Call the list instance partitions method over HTTP. @@ -2205,8 +3356,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.ListInstancePartitionsResponse: @@ -2218,6 +3371,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseListInstancePartitions._get_http_options() ) + request, metadata = self._interceptor.pre_list_instance_partitions( request, metadata ) @@ -2230,6 +3384,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.ListInstancePartitions", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstancePartitions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._ListInstancePartitions._get_response( self._host, @@ -2250,7 +3431,37 @@ def __call__( pb_resp = spanner_instance_admin.ListInstancePartitionsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instance_partitions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_instance_partitions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_instance_admin.ListInstancePartitionsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.list_instance_partitions", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstancePartitions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstances( @@ -2287,7 +3498,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner_instance_admin.ListInstancesResponse: r"""Call the list instances method over HTTP. @@ -2298,8 +3509,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner_instance_admin.ListInstancesResponse: @@ -2311,6 +3524,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseListInstances._get_http_options() ) + request, metadata = self._interceptor.pre_list_instances(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseListInstances._get_transcoded_request( http_options, request @@ -2321,6 +3535,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.ListInstances", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstances", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._ListInstances._get_response( self._host, @@ -2341,7 +3582,35 @@ def __call__( pb_resp = spanner_instance_admin.ListInstancesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instances(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_instances_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + spanner_instance_admin.ListInstancesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.list_instances", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "ListInstances", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _MoveInstance( @@ -2379,7 +3648,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the move instance method over HTTP. @@ -2390,8 +3659,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2404,6 +3675,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseMoveInstance._get_http_options() ) + request, metadata = self._interceptor.pre_move_instance(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseMoveInstance._get_transcoded_request( http_options, request @@ -2418,6 +3690,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.MoveInstance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "MoveInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._MoveInstance._get_response( self._host, @@ -2437,7 +3736,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_move_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_move_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.move_instance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "MoveInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SetIamPolicy( @@ -2475,7 +3800,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -2485,8 +3810,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2571,6 +3898,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseSetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( http_options, request @@ -2585,6 +3913,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._SetIamPolicy._get_response( self._host, @@ -2606,7 +3961,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_set_iam_policy_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.set_iam_policy", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _TestIamPermissions( @@ -2644,7 +4025,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -2654,8 +4035,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: @@ -2665,6 +4048,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseTestIamPermissions._get_http_options() ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) @@ -2681,6 +4065,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._TestIamPermissions._get_response( self._host, @@ -2702,7 +4113,33 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_test_iam_permissions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateInstance( @@ -2740,7 +4177,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update instance method over HTTP. @@ -2751,8 +4188,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2765,6 +4204,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_update_instance(request, metadata) transcoded_request = _BaseInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( http_options, request @@ -2779,6 +4219,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.UpdateInstance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "UpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._UpdateInstance._get_response( self._host, @@ -2798,7 +4265,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_instance(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_instance_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.update_instance", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "UpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateInstanceConfig( @@ -2836,19 +4329,21 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update instance config method over HTTP. Args: request (~.spanner_instance_admin.UpdateInstanceConfigRequest): The request object. The request for - [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest]. + [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2861,6 +4356,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseUpdateInstanceConfig._get_http_options() ) + request, metadata = self._interceptor.pre_update_instance_config( request, metadata ) @@ -2877,6 +4373,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.UpdateInstanceConfig", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "UpdateInstanceConfig", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = InstanceAdminRestTransport._UpdateInstanceConfig._get_response( self._host, @@ -2896,7 +4419,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_instance_config(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_instance_config_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.update_instance_config", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "UpdateInstanceConfig", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateInstancePartition( @@ -2935,7 +4484,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update instance partition method over HTTP. @@ -2946,8 +4495,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2960,6 +4511,7 @@ def __call__( http_options = ( _BaseInstanceAdminRestTransport._BaseUpdateInstancePartition._get_http_options() ) + request, metadata = self._interceptor.pre_update_instance_partition( request, metadata ) @@ -2976,6 +4528,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner.admin.instance_v1.InstanceAdminClient.UpdateInstancePartition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "UpdateInstancePartition", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( InstanceAdminRestTransport._UpdateInstancePartition._get_response( @@ -2997,7 +4576,33 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_instance_partition(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_update_instance_partition_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner.admin.instance_v1.InstanceAdminClient.update_instance_partition", + extra={ + "serviceName": "google.spanner.admin.instance.v1.InstanceAdmin", + "rpcName": "UpdateInstancePartition", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/spanner_admin_instance_v1/types/__init__.py b/google/cloud/spanner_admin_instance_v1/types/__init__.py index 46fa3b0711..38ba52abc3 100644 --- a/google/cloud/spanner_admin_instance_v1/types/__init__.py +++ b/google/cloud/spanner_admin_instance_v1/types/__init__.py @@ -29,6 +29,7 @@ DeleteInstanceConfigRequest, DeleteInstancePartitionRequest, DeleteInstanceRequest, + FreeInstanceMetadata, GetInstanceConfigRequest, GetInstancePartitionRequest, GetInstanceRequest, @@ -72,6 +73,7 @@ "DeleteInstanceConfigRequest", "DeleteInstancePartitionRequest", "DeleteInstanceRequest", + "FreeInstanceMetadata", "GetInstanceConfigRequest", "GetInstancePartitionRequest", "GetInstanceRequest", diff --git a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py index ce72053b27..01a6584f68 100644 --- a/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py +++ b/google/cloud/spanner_admin_instance_v1/types/spanner_instance_admin.py @@ -49,6 +49,7 @@ "DeleteInstanceRequest", "CreateInstanceMetadata", "UpdateInstanceMetadata", + "FreeInstanceMetadata", "CreateInstanceConfigMetadata", "UpdateInstanceConfigMetadata", "InstancePartition", @@ -74,7 +75,7 @@ class ReplicaInfo(proto.Message): Attributes: location (str): - The location of the serving resources, e.g. + The location of the serving resources, e.g., "us-central1". type_ (google.cloud.spanner_admin_instance_v1.types.ReplicaInfo.ReplicaType): The type of replica. @@ -161,20 +162,24 @@ class InstanceConfig(proto.Message): configuration is a Google-managed or user-managed configuration. replicas (MutableSequence[google.cloud.spanner_admin_instance_v1.types.ReplicaInfo]): - The geographic placement of nodes in this - instance configuration and their replication - properties. + The geographic placement of nodes in this instance + configuration and their replication properties. + + To create user-managed configurations, input ``replicas`` + must include all replicas in ``replicas`` of the + ``base_config`` and include one or more replicas in the + ``optional_replicas`` of the ``base_config``. optional_replicas (MutableSequence[google.cloud.spanner_admin_instance_v1.types.ReplicaInfo]): Output only. The available optional replicas - to choose from for user managed configurations. - Populated for Google managed configurations. + to choose from for user-managed configurations. + Populated for Google-managed configurations. base_config (str): Base configuration name, e.g. projects//instanceConfigs/nam3, based on which - this configuration is created. Only set for user managed + this configuration is created. Only set for user-managed configurations. ``base_config`` must refer to a - configuration of type GOOGLE_MANAGED in the same project as - this configuration. + configuration of type ``GOOGLE_MANAGED`` in the same project + as this configuration. labels (MutableMapping[str, str]): Cloud Labels are a flexible and lightweight mechanism for organizing cloud resources into groups that reflect a @@ -233,6 +238,16 @@ class InstanceConfig(proto.Message): state (google.cloud.spanner_admin_instance_v1.types.InstanceConfig.State): Output only. The current instance configuration state. Applicable only for ``USER_MANAGED`` configurations. + free_instance_availability (google.cloud.spanner_admin_instance_v1.types.InstanceConfig.FreeInstanceAvailability): + Output only. Describes whether free instances + are available to be created in this instance + configuration. + quorum_type (google.cloud.spanner_admin_instance_v1.types.InstanceConfig.QuorumType): + Output only. The ``QuorumType`` of the instance + configuration. + storage_limit_per_processing_unit (int): + Output only. The storage limit in bytes per + processing unit. """ class Type(proto.Enum): @@ -242,9 +257,9 @@ class Type(proto.Enum): TYPE_UNSPECIFIED (0): Unspecified. GOOGLE_MANAGED (1): - Google managed configuration. + Google-managed configuration. USER_MANAGED (2): - User managed configuration. + User-managed configuration. """ TYPE_UNSPECIFIED = 0 GOOGLE_MANAGED = 1 @@ -267,6 +282,62 @@ class State(proto.Enum): CREATING = 1 READY = 2 + class FreeInstanceAvailability(proto.Enum): + r"""Describes the availability for free instances to be created + in an instance configuration. + + Values: + FREE_INSTANCE_AVAILABILITY_UNSPECIFIED (0): + Not specified. + AVAILABLE (1): + Indicates that free instances are available + to be created in this instance configuration. + UNSUPPORTED (2): + Indicates that free instances are not + supported in this instance configuration. + DISABLED (3): + Indicates that free instances are currently + not available to be created in this instance + configuration. + QUOTA_EXCEEDED (4): + Indicates that additional free instances + cannot be created in this instance configuration + because the project has reached its limit of + free instances. + """ + FREE_INSTANCE_AVAILABILITY_UNSPECIFIED = 0 + AVAILABLE = 1 + UNSUPPORTED = 2 + DISABLED = 3 + QUOTA_EXCEEDED = 4 + + class QuorumType(proto.Enum): + r"""Indicates the quorum type of this instance configuration. + + Values: + QUORUM_TYPE_UNSPECIFIED (0): + Quorum type not specified. + REGION (1): + An instance configuration tagged with ``REGION`` quorum type + forms a write quorum in a single region. + DUAL_REGION (2): + An instance configuration tagged with the ``DUAL_REGION`` + quorum type forms a write quorum with exactly two read-write + regions in a multi-region configuration. + + This instance configuration requires failover in the event + of regional failures. + MULTI_REGION (3): + An instance configuration tagged with the ``MULTI_REGION`` + quorum type forms a write quorum from replicas that are + spread across more than one region in a multi-region + configuration. + """ + QUORUM_TYPE_UNSPECIFIED = 0 + REGION = 1 + DUAL_REGION = 2 + MULTI_REGION = 3 + name: str = proto.Field( proto.STRING, number=1, @@ -316,6 +387,20 @@ class State(proto.Enum): number=11, enum=State, ) + free_instance_availability: FreeInstanceAvailability = proto.Field( + proto.ENUM, + number=12, + enum=FreeInstanceAvailability, + ) + quorum_type: QuorumType = proto.Field( + proto.ENUM, + number=18, + enum=QuorumType, + ) + storage_limit_per_processing_unit: int = proto.Field( + proto.INT64, + number=19, + ) class ReplicaComputeCapacity(proto.Message): @@ -467,7 +552,7 @@ class AutoscalingTargets(proto.Message): Required. The target storage utilization percentage that the autoscaler should be trying to achieve for the instance. This number is on a scale from 0 (no utilization) to 100 - (full utilization). The valid range is [10, 100] inclusive. + (full utilization). The valid range is [10, 99] inclusive. """ high_priority_cpu_utilization_percent: int = proto.Field( @@ -591,11 +676,6 @@ class Instance(proto.Message): This might be zero in API responses for instances that are not yet in the ``READY`` state. - If the instance has varying node count across replicas - (achieved by setting asymmetric_autoscaling_options in - autoscaling config), the node_count here is the maximum node - count across all replicas. - For more information, see `Compute capacity, nodes, and processing units `__. @@ -614,11 +694,6 @@ class Instance(proto.Message): This might be zero in API responses for instances that are not yet in the ``READY`` state. - If the instance has varying processing units per replica - (achieved by setting asymmetric_autoscaling_options in - autoscaling config), the processing_units here is the - maximum processing units across all replicas. - For more information, see `Compute capacity, nodes and processing units `__. @@ -669,6 +744,8 @@ class Instance(proto.Message): being disallowed. For example, representing labels as the string: name + "*" + value would prove problematic if we were to allow "*" in a future release. + instance_type (google.cloud.spanner_admin_instance_v1.types.Instance.InstanceType): + The ``InstanceType`` of the current instance. endpoint_uris (MutableSequence[str]): Deprecated. This field is not populated. create_time (google.protobuf.timestamp_pb2.Timestamp): @@ -677,20 +754,25 @@ class Instance(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time at which the instance was most recently updated. + free_instance_metadata (google.cloud.spanner_admin_instance_v1.types.FreeInstanceMetadata): + Free instance metadata. Only populated for + free instances. edition (google.cloud.spanner_admin_instance_v1.types.Instance.Edition): Optional. The ``Edition`` of the current instance. default_backup_schedule_type (google.cloud.spanner_admin_instance_v1.types.Instance.DefaultBackupScheduleType): - Optional. Controls the default backup behavior for new - databases within the instance. + Optional. Controls the default backup schedule behavior for + new databases within the instance. By default, a backup + schedule is created automatically when a new database is + created in a new instance. - Note that ``AUTOMATIC`` is not permitted for free instances, - as backups and backup schedules are not allowed for free - instances. + Note that the ``AUTOMATIC`` value isn't permitted for free + instances, as backups and backup schedules aren't supported + for free instances. In the ``GetInstance`` or ``ListInstances`` response, if the - value of default_backup_schedule_type is unset or NONE, no - default backup schedule will be created for new databases - within the instance. + value of ``default_backup_schedule_type`` isn't set, or set + to ``NONE``, Spanner doesn't create a default backup + schedule for new databases in the instance. """ class State(proto.Enum): @@ -712,6 +794,27 @@ class State(proto.Enum): CREATING = 1 READY = 2 + class InstanceType(proto.Enum): + r"""The type of this instance. The type can be used to distinguish + product variants, that can affect aspects like: usage restrictions, + quotas and billing. Currently this is used to distinguish + FREE_INSTANCE vs PROVISIONED instances. + + Values: + INSTANCE_TYPE_UNSPECIFIED (0): + Not specified. + PROVISIONED (1): + Provisioned instances have dedicated + resources, standard usage limits and support. + FREE_INSTANCE (2): + Free instances provide no guarantee for dedicated resources, + [node_count, processing_units] should be 0. They come with + stricter usage limits and limited support. + """ + INSTANCE_TYPE_UNSPECIFIED = 0 + PROVISIONED = 1 + FREE_INSTANCE = 2 + class Edition(proto.Enum): r"""The edition selected for this instance. Different editions provide different capabilities at different price points. @@ -732,25 +835,25 @@ class Edition(proto.Enum): ENTERPRISE_PLUS = 3 class DefaultBackupScheduleType(proto.Enum): - r"""Indicates the default backup behavior for new databases - within the instance. + r"""Indicates the `default backup + schedule `__ + behavior for new databases within the instance. Values: DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED (0): Not specified. NONE (1): - No default backup schedule will be created - automatically on creation of a database within + A default backup schedule isn't created + automatically when a new database is created in the instance. AUTOMATIC (2): - A default backup schedule will be created - automatically on creation of a database within + A default backup schedule is created + automatically when a new database is created in the instance. The default backup schedule - creates a full backup every 24 hours and retains - the backup for a period of 7 days. Once created, - the default backup schedule can be - edited/deleted similar to any other backup - schedule. + creates a full backup every 24 hours. These full + backups are retained for 7 days. You can edit or + delete the default backup schedule once it's + created. """ DEFAULT_BACKUP_SCHEDULE_TYPE_UNSPECIFIED = 0 NONE = 1 @@ -798,6 +901,11 @@ class DefaultBackupScheduleType(proto.Enum): proto.STRING, number=7, ) + instance_type: InstanceType = proto.Field( + proto.ENUM, + number=10, + enum=InstanceType, + ) endpoint_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=8, @@ -812,6 +920,11 @@ class DefaultBackupScheduleType(proto.Enum): number=12, message=timestamp_pb2.Timestamp, ) + free_instance_metadata: "FreeInstanceMetadata" = proto.Field( + proto.MESSAGE, + number=13, + message="FreeInstanceMetadata", + ) edition: Edition = proto.Field( proto.ENUM, number=20, @@ -906,7 +1019,7 @@ class GetInstanceConfigRequest(proto.Message): class CreateInstanceConfigRequest(proto.Message): r"""The request for - [CreateInstanceConfigRequest][InstanceAdmin.CreateInstanceConfigRequest]. + [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. Attributes: parent (str): @@ -920,10 +1033,10 @@ class CreateInstanceConfigRequest(proto.Message): characters in length. The ``custom-`` prefix is required to avoid name conflicts with Google-managed configurations. instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): - Required. The InstanceConfig proto of the configuration to - create. instance_config.name must be + Required. The ``InstanceConfig`` proto of the configuration + to create. ``instance_config.name`` must be ``/instanceConfigs/``. - instance_config.base_config must be a Google managed + ``instance_config.base_config`` must be a Google-managed configuration name, e.g. /instanceConfigs/us-east1, /instanceConfigs/nam3. validate_only (bool): @@ -953,7 +1066,7 @@ class CreateInstanceConfigRequest(proto.Message): class UpdateInstanceConfigRequest(proto.Message): r"""The request for - [UpdateInstanceConfigRequest][InstanceAdmin.UpdateInstanceConfigRequest]. + [UpdateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstanceConfig]. Attributes: instance_config (google.cloud.spanner_admin_instance_v1.types.InstanceConfig): @@ -997,7 +1110,7 @@ class UpdateInstanceConfigRequest(proto.Message): class DeleteInstanceConfigRequest(proto.Message): r"""The request for - [DeleteInstanceConfigRequest][InstanceAdmin.DeleteInstanceConfigRequest]. + [DeleteInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.DeleteInstanceConfig]. Attributes: name (str): @@ -1053,8 +1166,7 @@ class ListInstanceConfigOperationsRequest(proto.Message): ``:``. Colon ``:`` is the contains operator. Filter rules are not case sensitive. - The following fields in the - [Operation][google.longrunning.Operation] are eligible for + The following fields in the Operation are eligible for filtering: - ``name`` - The name of the long-running operation @@ -1129,12 +1241,11 @@ class ListInstanceConfigOperationsResponse(proto.Message): Attributes: operations (MutableSequence[google.longrunning.operations_pb2.Operation]): - The list of matching instance configuration [long-running - operations][google.longrunning.Operation]. Each operation's - name will be prefixed by the name of the instance - configuration. The operation's - [metadata][google.longrunning.Operation.metadata] field type - ``metadata.type_url`` describes the type of the metadata. + The list of matching instance configuration long-running + operations. Each operation's name will be prefixed by the + name of the instance configuration. The operation's metadata + field type ``metadata.type_url`` describes the type of the + metadata. next_page_token (str): ``next_page_token`` can be sent in a subsequent [ListInstanceConfigOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigOperations] @@ -1474,6 +1585,65 @@ class UpdateInstanceMetadata(proto.Message): ) +class FreeInstanceMetadata(proto.Message): + r"""Free instance specific metadata that is kept even after an + instance has been upgraded for tracking purposes. + + Attributes: + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. Timestamp after which the + instance will either be upgraded or scheduled + for deletion after a grace period. + ExpireBehavior is used to choose between + upgrading or scheduling the free instance for + deletion. This timestamp is set during the + creation of a free instance. + upgrade_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. If present, the timestamp at + which the free instance was upgraded to a + provisioned instance. + expire_behavior (google.cloud.spanner_admin_instance_v1.types.FreeInstanceMetadata.ExpireBehavior): + Specifies the expiration behavior of a free instance. The + default of ExpireBehavior is ``REMOVE_AFTER_GRACE_PERIOD``. + This can be modified during or after creation, and before + expiration. + """ + + class ExpireBehavior(proto.Enum): + r"""Allows users to change behavior when a free instance expires. + + Values: + EXPIRE_BEHAVIOR_UNSPECIFIED (0): + Not specified. + FREE_TO_PROVISIONED (1): + When the free instance expires, upgrade the + instance to a provisioned instance. + REMOVE_AFTER_GRACE_PERIOD (2): + When the free instance expires, disable the + instance, and delete it after the grace period + passes if it has not been upgraded. + """ + EXPIRE_BEHAVIOR_UNSPECIFIED = 0 + FREE_TO_PROVISIONED = 1 + REMOVE_AFTER_GRACE_PERIOD = 2 + + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + upgrade_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + expire_behavior: ExpireBehavior = proto.Field( + proto.ENUM, + number=3, + enum=ExpireBehavior, + ) + + class CreateInstanceConfigMetadata(proto.Message): r"""Metadata type for the operation returned by [CreateInstanceConfig][google.spanner.admin.instance.v1.InstanceAdmin.CreateInstanceConfig]. @@ -1576,7 +1746,7 @@ class InstancePartition(proto.Message): node_count (int): The number of nodes allocated to this instance partition. - Users can set the node_count field to specify the target + Users can set the ``node_count`` field to specify the target number of nodes allocated to the instance partition. This may be zero in API responses for instance partitions @@ -1587,12 +1757,12 @@ class InstancePartition(proto.Message): The number of processing units allocated to this instance partition. - Users can set the processing_units field to specify the + Users can set the ``processing_units`` field to specify the target number of processing units allocated to the instance partition. - This may be zero in API responses for instance partitions - that are not yet in state ``READY``. + This might be zero in API responses for instance partitions + that are not yet in the ``READY`` state. This field is a member of `oneof`_ ``compute_capacity``. state (google.cloud.spanner_admin_instance_v1.types.InstancePartition.State): @@ -1611,11 +1781,13 @@ class InstancePartition(proto.Message): existence of any referencing database prevents the instance partition from being deleted. referencing_backups (MutableSequence[str]): - Output only. The names of the backups that - reference this instance partition. Referencing - backups should share the parent instance. The - existence of any referencing backup prevents the - instance partition from being deleted. + Output only. Deprecated: This field is not + populated. Output only. The names of the backups + that reference this instance partition. + Referencing backups should share the parent + instance. The existence of any referencing + backup prevents the instance partition from + being deleted. etag (str): Used for optimistic concurrency control as a way to help prevent simultaneous updates of a @@ -1912,7 +2084,10 @@ class ListInstancePartitionsRequest(proto.Message): parent (str): Required. The instance whose instance partitions should be listed. Values are of the form - ``projects//instances/``. + ``projects//instances/``. Use + ``{instance} = '-'`` to list instance partitions for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. page_size (int): Number of instance partitions to be returned in the response. If 0 or less, defaults to the @@ -1962,9 +2137,9 @@ class ListInstancePartitionsResponse(proto.Message): [ListInstancePartitions][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitions] call to fetch more of the matching instance partitions. unreachable (MutableSequence[str]): - The list of unreachable instance partitions. It includes the - names of instance partitions whose metadata could not be - retrieved within + The list of unreachable instances or instance partitions. It + includes the names of instances or instance partitions whose + metadata could not be retrieved within [instance_partition_deadline][google.spanner.admin.instance.v1.ListInstancePartitionsRequest.instance_partition_deadline]. """ @@ -2007,8 +2182,7 @@ class ListInstancePartitionOperationsRequest(proto.Message): ``:``. Colon ``:`` is the contains operator. Filter rules are not case sensitive. - The following fields in the - [Operation][google.longrunning.Operation] are eligible for + The following fields in the Operation are eligible for filtering: - ``name`` - The name of the long-running operation @@ -2062,7 +2236,7 @@ class ListInstancePartitionOperationsRequest(proto.Message): instance partition operations. Instance partitions whose operation metadata cannot be retrieved within this deadline will be added to - [unreachable][ListInstancePartitionOperationsResponse.unreachable] + [unreachable_instance_partitions][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse.unreachable_instance_partitions] in [ListInstancePartitionOperationsResponse][google.spanner.admin.instance.v1.ListInstancePartitionOperationsResponse]. """ @@ -2096,12 +2270,11 @@ class ListInstancePartitionOperationsResponse(proto.Message): Attributes: operations (MutableSequence[google.longrunning.operations_pb2.Operation]): - The list of matching instance partition [long-running - operations][google.longrunning.Operation]. Each operation's - name will be prefixed by the instance partition's name. The - operation's - [metadata][google.longrunning.Operation.metadata] field type - ``metadata.type_url`` describes the type of the metadata. + The list of matching instance partition long-running + operations. Each operation's name will be prefixed by the + instance partition's name. The operation's metadata field + type ``metadata.type_url`` describes the type of the + metadata. next_page_token (str): ``next_page_token`` can be sent in a subsequent [ListInstancePartitionOperations][google.spanner.admin.instance.v1.InstanceAdmin.ListInstancePartitionOperations] diff --git a/google/cloud/spanner_dbapi/batch_dml_executor.py b/google/cloud/spanner_dbapi/batch_dml_executor.py index 7c4272a0ca..5c4e2495bb 100644 --- a/google/cloud/spanner_dbapi/batch_dml_executor.py +++ b/google/cloud/spanner_dbapi/batch_dml_executor.py @@ -87,7 +87,9 @@ def run_batch_dml(cursor: "Cursor", statements: List[Statement]): for statement in statements: statements_tuple.append(statement.get_tuple()) if not connection._client_transaction_started: - res = connection.database.run_in_transaction(_do_batch_update, statements_tuple) + res = connection.database.run_in_transaction( + _do_batch_update_autocommit, statements_tuple + ) many_result_set.add_iter(res) cursor._row_count = sum([max(val, 0) for val in res]) else: @@ -113,10 +115,10 @@ def run_batch_dml(cursor: "Cursor", statements: List[Statement]): connection._transaction_helper.retry_transaction() -def _do_batch_update(transaction, statements): +def _do_batch_update_autocommit(transaction, statements): from google.cloud.spanner_dbapi import OperationalError - status, res = transaction.batch_update(statements) + status, res = transaction.batch_update(statements, last_statement=True) if status.code == ABORTED: raise Aborted(status.message) elif status.code != OK: diff --git a/google/cloud/spanner_dbapi/cursor.py b/google/cloud/spanner_dbapi/cursor.py index a72a8e9de1..5c1539e7fc 100644 --- a/google/cloud/spanner_dbapi/cursor.py +++ b/google/cloud/spanner_dbapi/cursor.py @@ -229,7 +229,10 @@ def _do_execute_update_in_autocommit(self, transaction, sql, params): self.connection._transaction = transaction self.connection._snapshot = None self._result_set = transaction.execute_sql( - sql, params=params, param_types=get_param_types(params) + sql, + params=params, + param_types=get_param_types(params), + last_statement=True, ) self._itr = PeekIterator(self._result_set) self._row_count = None diff --git a/google/cloud/spanner_v1/__init__.py b/google/cloud/spanner_v1/__init__.py index d2e7a23938..beeed1dacf 100644 --- a/google/cloud/spanner_v1/__init__.py +++ b/google/cloud/spanner_v1/__init__.py @@ -64,7 +64,7 @@ from .types.type import TypeAnnotationCode from .types.type import TypeCode from .data_types import JsonObject -from .transaction import BatchTransactionId +from .transaction import BatchTransactionId, DefaultTransactionOptions from google.cloud.spanner_v1 import param_types from google.cloud.spanner_v1.client import Client @@ -149,4 +149,5 @@ "SpannerClient", "SpannerAsyncClient", "BatchTransactionId", + "DefaultTransactionOptions", ) diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py index 27e53200ed..d1f64db2d8 100644 --- a/google/cloud/spanner_v1/_helpers.py +++ b/google/cloud/spanner_v1/_helpers.py @@ -32,9 +32,18 @@ from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1 import ExecuteSqlRequest from google.cloud.spanner_v1 import JsonObject +from google.cloud.spanner_v1 import TransactionOptions from google.cloud.spanner_v1.request_id_header import with_request_id from google.rpc.error_details_pb2 import RetryInfo +try: + from opentelemetry.propagate import inject + from opentelemetry.propagators.textmap import Setter + + HAS_OPENTELEMETRY_INSTALLED = True +except ImportError: + HAS_OPENTELEMETRY_INSTALLED = False +from typing import List, Tuple import random # Validation error messages @@ -47,6 +56,29 @@ ) +if HAS_OPENTELEMETRY_INSTALLED: + + class OpenTelemetryContextSetter(Setter): + """ + Used by Open Telemetry for context propagation. + """ + + def set(self, carrier: List[Tuple[str, str]], key: str, value: str) -> None: + """ + Injects trace context into Spanner metadata + + Args: + carrier(PubsubMessage): The Pub/Sub message which is the carrier of Open Telemetry + data. + key(str): The key for which the Open Telemetry context data needs to be set. + value(str): The Open Telemetry context value to be set. + + Returns: + None + """ + carrier.append((key, value)) + + def _try_to_coerce_bytes(bytestring): """Try to coerce a byte string into the right thing based on Python version and whether or not it is base64 encoded. @@ -550,6 +582,21 @@ def _metadata_with_leader_aware_routing(value, **kw): return ("x-goog-spanner-route-to-leader", str(value).lower()) +def _metadata_with_span_context(metadata: List[Tuple[str, str]], **kw) -> None: + """ + Appends metadata with end to end tracing header and OpenTelemetry span context . + + Args: + metadata (list[tuple[str, str]]): The metadata carrier where the OpenTelemetry context + should be injected. + Returns: + None + """ + if HAS_OPENTELEMETRY_INSTALLED: + metadata.append(("x-goog-spanner-end-to-end-tracing", "true")) + inject(setter=OpenTelemetryContextSetter(), carrier=metadata) + + def _delay_until_retry(exc, deadline, attempts): """Helper for :meth:`Session.run_in_transaction`. @@ -644,3 +691,38 @@ def __radd__(self, n): def _metadata_with_request_id(*args, **kwargs): return with_request_id(*args, **kwargs) + + +def _merge_Transaction_Options( + defaultTransactionOptions: TransactionOptions, + mergeTransactionOptions: TransactionOptions, +) -> TransactionOptions: + """Merges two TransactionOptions objects. + + - Values from `mergeTransactionOptions` take precedence if set. + - Values from `defaultTransactionOptions` are used only if missing. + + Args: + defaultTransactionOptions (TransactionOptions): The default transaction options (fallback values). + mergeTransactionOptions (TransactionOptions): The main transaction options (overrides when set). + + Returns: + TransactionOptions: A merged TransactionOptions object. + """ + + if defaultTransactionOptions is None: + return mergeTransactionOptions + + if mergeTransactionOptions is None: + return defaultTransactionOptions + + merged_pb = TransactionOptions()._pb # Create a new protobuf object + + # Merge defaultTransactionOptions first + merged_pb.MergeFrom(defaultTransactionOptions._pb) + + # Merge transactionOptions, ensuring it overrides default values + merged_pb.MergeFrom(mergeTransactionOptions._pb) + + # Convert protobuf object back into a TransactionOptions instance + return TransactionOptions(merged_pb) diff --git a/google/cloud/spanner_v1/_opentelemetry_tracing.py b/google/cloud/spanner_v1/_opentelemetry_tracing.py index 5ce23cab74..eafc983850 100644 --- a/google/cloud/spanner_v1/_opentelemetry_tracing.py +++ b/google/cloud/spanner_v1/_opentelemetry_tracing.py @@ -20,6 +20,9 @@ from google.cloud.spanner_v1 import SpannerClient from google.cloud.spanner_v1 import gapic_version +from google.cloud.spanner_v1._helpers import ( + _metadata_with_span_context, +) try: from opentelemetry import trace @@ -33,11 +36,16 @@ except ImportError: HAS_OPENTELEMETRY_INSTALLED = False +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture + TRACER_NAME = "cloud.google.com/python/spanner" TRACER_VERSION = gapic_version.__version__ extended_tracing_globally_disabled = ( os.getenv("SPANNER_ENABLE_EXTENDED_TRACING", "").lower() == "false" ) +end_to_end_tracing_globally_enabled = ( + os.getenv("SPANNER_ENABLE_END_TO_END_TRACING", "").lower() == "true" +) def get_tracer(tracer_provider=None): @@ -56,7 +64,9 @@ def get_tracer(tracer_provider=None): @contextmanager -def trace_call(name, session=None, extra_attributes=None, observability_options=None): +def trace_call( + name, session=None, extra_attributes=None, observability_options=None, metadata=None +): if session: session._last_use_time = datetime.now() @@ -72,6 +82,8 @@ def trace_call(name, session=None, extra_attributes=None, observability_options= # on by default. enable_extended_tracing = True + enable_end_to_end_tracing = False + db_name = "" if session and getattr(session, "_database", None): db_name = session._database.name @@ -81,6 +93,9 @@ def trace_call(name, session=None, extra_attributes=None, observability_options= enable_extended_tracing = observability_options.get( "enable_extended_tracing", enable_extended_tracing ) + enable_end_to_end_tracing = observability_options.get( + "enable_end_to_end_tracing", enable_end_to_end_tracing + ) db_name = observability_options.get("db_name", db_name) tracer = get_tracer(tracer_provider) @@ -108,29 +123,35 @@ def trace_call(name, session=None, extra_attributes=None, observability_options= if not enable_extended_tracing: attributes.pop("db.statement", False) + if end_to_end_tracing_globally_enabled: + enable_end_to_end_tracing = True + with tracer.start_as_current_span( name, kind=trace.SpanKind.CLIENT, attributes=attributes ) as span: - try: - yield span - except Exception as error: - span.set_status(Status(StatusCode.ERROR, str(error))) - # OpenTelemetry-Python imposes invoking span.record_exception on __exit__ - # on any exception. We should file a bug later on with them to only - # invoke .record_exception if not already invoked, hence we should not - # invoke .record_exception on our own else we shall have 2 exceptions. - raise - else: - # All spans still have set_status available even if for example - # NonRecordingSpan doesn't have "_status". - absent_span_status = getattr(span, "_status", None) is None - if absent_span_status or span._status.status_code == StatusCode.UNSET: - # OpenTelemetry-Python only allows a status change - # if the current code is UNSET or ERROR. At the end - # of the generator's consumption, only set it to OK - # it wasn't previously set otherwise. - # https://github.com/googleapis/python-spanner/issues/1246 - span.set_status(Status(StatusCode.OK)) + with MetricsCapture(): + try: + if enable_end_to_end_tracing: + _metadata_with_span_context(metadata) + yield span + except Exception as error: + span.set_status(Status(StatusCode.ERROR, str(error))) + # OpenTelemetry-Python imposes invoking span.record_exception on __exit__ + # on any exception. We should file a bug later on with them to only + # invoke .record_exception if not already invoked, hence we should not + # invoke .record_exception on our own else we shall have 2 exceptions. + raise + else: + # All spans still have set_status available even if for example + # NonRecordingSpan doesn't have "_status". + absent_span_status = getattr(span, "_status", None) is None + if absent_span_status or span._status.status_code == StatusCode.UNSET: + # OpenTelemetry-Python only allows a status change + # if the current code is UNSET or ERROR. At the end + # of the generator's consumption, only set it to OK + # it wasn't previously set otherwise. + # https://github.com/googleapis/python-spanner/issues/1246 + span.set_status(Status(StatusCode.OK)) def get_current_span(): diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py index 6a9f1f48f5..39e29d4d41 100644 --- a/google/cloud/spanner_v1/batch.py +++ b/google/cloud/spanner_v1/batch.py @@ -25,6 +25,7 @@ from google.cloud.spanner_v1._helpers import ( _metadata_with_prefix, _metadata_with_leader_aware_routing, + _merge_Transaction_Options, ) from google.cloud.spanner_v1._opentelemetry_tracing import trace_call from google.cloud.spanner_v1 import RequestOptions @@ -32,6 +33,7 @@ from google.cloud.spanner_v1._helpers import _retry_on_aborted_exception from google.cloud.spanner_v1._helpers import _check_rst_stream_error from google.api_core.exceptions import InternalServerError +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture import time DEFAULT_RETRY_TIMEOUT_SECS = 30 @@ -166,6 +168,7 @@ def commit( request_options=None, max_commit_delay=None, exclude_txn_from_change_streams=False, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, **kwargs, ): """Commit mutations to the database. @@ -186,6 +189,18 @@ def commit( (Optional) The amount of latency this request is willing to incur in order to improve throughput. + :type exclude_txn_from_change_streams: bool + :param exclude_txn_from_change_streams: + (Optional) If true, instructs the transaction to be excluded from being recorded in change streams + with the DDL option `allow_txn_exclusion=true`. This does not exclude the transaction from + being recorded in the change streams with the DDL option `allow_txn_exclusion` being false or + unset. + + :type isolation_level: + :class:`google.cloud.spanner_v1.types.TransactionOptions.IsolationLevel` + :param isolation_level: + (Optional) Sets isolation level for the transaction. + :rtype: datetime :returns: timestamp of the committed changes. """ @@ -200,6 +215,12 @@ def commit( txn_options = TransactionOptions( read_write=TransactionOptions.ReadWrite(), exclude_txn_from_change_streams=exclude_txn_from_change_streams, + isolation_level=isolation_level, + ) + + txn_options = _merge_Transaction_Options( + database.default_transaction_options.default_read_write_transaction_options, + txn_options, ) trace_attributes = {"num_mutations": len(self._mutations)} @@ -226,7 +247,8 @@ def commit( self._session, trace_attributes, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): method = functools.partial( api.commit, request=request, @@ -348,7 +370,8 @@ def batch_write(self, request_options=None, exclude_txn_from_change_streams=Fals self._session, trace_attributes, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): method = functools.partial( api.batch_write, request=request, diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index afe6264717..e201f93e9b 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -31,6 +31,7 @@ from google.auth.credentials import AnonymousCredentials import google.api_core.client_options from google.cloud.client import ClientWithProject +from typing import Optional from google.cloud.spanner_admin_database_v1 import DatabaseAdminClient @@ -45,12 +46,34 @@ from google.cloud.spanner_admin_instance_v1 import ListInstancesRequest from google.cloud.spanner_v1 import __version__ from google.cloud.spanner_v1 import ExecuteSqlRequest +from google.cloud.spanner_v1 import DefaultTransactionOptions from google.cloud.spanner_v1._helpers import _merge_query_options from google.cloud.spanner_v1._helpers import _metadata_with_prefix from google.cloud.spanner_v1.instance import Instance +from google.cloud.spanner_v1.metrics.constants import ( + ENABLE_SPANNER_METRICS_ENV_VAR, + METRIC_EXPORT_INTERVAL_MS, +) +from google.cloud.spanner_v1.metrics.spanner_metrics_tracer_factory import ( + SpannerMetricsTracerFactory, +) +from google.cloud.spanner_v1.metrics.metrics_exporter import ( + CloudMonitoringMetricsExporter, +) + +try: + from opentelemetry import metrics + from opentelemetry.sdk.metrics import MeterProvider + from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader + + HAS_GOOGLE_CLOUD_MONITORING_INSTALLED = True +except ImportError: # pragma: NO COVER + HAS_GOOGLE_CLOUD_MONITORING_INSTALLED = False + _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) EMULATOR_ENV_VAR = "SPANNER_EMULATOR_HOST" +ENABLE_BUILTIN_METRICS_ENV_VAR = "SPANNER_ENABLE_BUILTIN_METRICS" _EMULATOR_HOST_HTTP_SCHEME = ( "%s contains a http scheme. When used with a scheme it may cause gRPC's " "DNS resolver to endlessly attempt to resolve. %s is intended to be used " @@ -73,6 +96,10 @@ def _get_spanner_optimizer_statistics_package(): return os.getenv(OPTIMIZER_STATISITCS_PACKAGE_ENV_VAR, "") +def _get_spanner_enable_builtin_metrics(): + return os.getenv(ENABLE_SPANNER_METRICS_ENV_VAR) == "true" + + class Client(ClientWithProject): """Client for interacting with Cloud Spanner API. @@ -135,6 +162,14 @@ class Client(ClientWithProject): Default `True`, please set it to `False` to turn it off or you can use the environment variable `SPANNER_ENABLE_EXTENDED_TRACING=` to control it. + enable_end_to_end_tracing: :type:boolean when set to true will allow for spans from Spanner server side. + Default `False`, please set it to `True` to turn it on + or you can use the environment variable `SPANNER_ENABLE_END_TO_END_TRACING=` + to control it. + + :type default_transaction_options: :class:`~google.cloud.spanner_v1.DefaultTransactionOptions` + or :class:`dict` + :param default_transaction_options: (Optional) Default options to use for all transactions. :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` @@ -157,6 +192,7 @@ def __init__( route_to_leader_enabled=True, directed_read_options=None, observability_options=None, + default_transaction_options: Optional[DefaultTransactionOptions] = None, ): self._emulator_host = _get_spanner_emulator_host() @@ -195,10 +231,36 @@ def __init__( "http://" in self._emulator_host or "https://" in self._emulator_host ): warnings.warn(_EMULATOR_HOST_HTTP_SCHEME) + # Check flag to enable Spanner builtin metrics + if ( + _get_spanner_enable_builtin_metrics() + and HAS_GOOGLE_CLOUD_MONITORING_INSTALLED + ): + meter_provider = metrics.NoOpMeterProvider() + if not _get_spanner_emulator_host(): + meter_provider = MeterProvider( + metric_readers=[ + PeriodicExportingMetricReader( + CloudMonitoringMetricsExporter(), + export_interval_millis=METRIC_EXPORT_INTERVAL_MS, + ) + ] + ) + metrics.set_meter_provider(meter_provider) + SpannerMetricsTracerFactory() + else: + SpannerMetricsTracerFactory(enabled=False) self._route_to_leader_enabled = route_to_leader_enabled self._directed_read_options = directed_read_options self._observability_options = observability_options + if default_transaction_options is None: + default_transaction_options = DefaultTransactionOptions() + elif not isinstance(default_transaction_options, DefaultTransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of DefaultTransactionOptions" + ) + self._default_transaction_options = default_transaction_options @property def credentials(self): @@ -289,6 +351,17 @@ def observability_options(self): """ return self._observability_options + @property + def default_transaction_options(self): + """Getter for default_transaction_options. + + :rtype: + :class:`~google.cloud.spanner_v1.DefaultTransactionOptions` + or :class:`dict` + :returns: The default transaction options that are used by this client for all transactions. + """ + return self._default_transaction_options + @property def directed_read_options(self): """Getter for directed_read_options. @@ -434,3 +507,21 @@ def directed_read_options(self, directed_read_options): or regions should be used for non-transactional reads or queries. """ self._directed_read_options = directed_read_options + + @default_transaction_options.setter + def default_transaction_options( + self, default_transaction_options: DefaultTransactionOptions + ): + """Sets default_transaction_options for the client + :type default_transaction_options: :class:`~google.cloud.spanner_v1.DefaultTransactionOptions` + or :class:`dict` + :param default_transaction_options: Default options to use for transactions. + """ + if default_transaction_options is None: + default_transaction_options = DefaultTransactionOptions() + elif not isinstance(default_transaction_options, DefaultTransactionOptions): + raise TypeError( + "default_transaction_options must be an instance of DefaultTransactionOptions" + ) + + self._default_transaction_options = default_transaction_options diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 963debdab8..03c6e5119f 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -46,6 +46,7 @@ from google.cloud.spanner_v1 import TypeCode from google.cloud.spanner_v1 import TransactionSelector from google.cloud.spanner_v1 import TransactionOptions +from google.cloud.spanner_v1 import DefaultTransactionOptions from google.cloud.spanner_v1 import RequestOptions from google.cloud.spanner_v1 import SpannerClient from google.cloud.spanner_v1._helpers import _merge_query_options @@ -72,6 +73,7 @@ get_current_span, trace_call, ) +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture SPANNER_DATA_SCOPE = "https://www.googleapis.com/auth/spanner.data" @@ -182,6 +184,9 @@ def __init__( self._enable_drop_protection = enable_drop_protection self._reconciling = False self._directed_read_options = self._instance._client.directed_read_options + self.default_transaction_options: DefaultTransactionOptions = ( + self._instance._client.default_transaction_options + ) self._proto_descriptors = proto_descriptors if pool is None: @@ -702,7 +707,7 @@ def execute_pdml(): with trace_call( "CloudSpanner.Database.execute_partitioned_pdml", observability_options=self.observability_options, - ) as span: + ) as span, MetricsCapture(): with SessionCheckout(self._pool) as session: add_span_event(span, "Starting BeginTransaction") txn = api.begin_transaction( @@ -728,6 +733,7 @@ def execute_pdml(): method=method, trace_name="CloudSpanner.ExecuteStreamingSql", request=request, + metadata=metadata, transaction_selector=txn_selector, observability_options=self.observability_options, ) @@ -780,6 +786,7 @@ def batch( request_options=None, max_commit_delay=None, exclude_txn_from_change_streams=False, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, **kw, ): """Return an object which wraps a batch. @@ -807,14 +814,21 @@ def batch( being recorded in the change streams with the DDL option `allow_txn_exclusion` being false or unset. + :type isolation_level: + :class:`google.cloud.spanner_v1.types.TransactionOptions.IsolationLevel` + :param isolation_level: + (Optional) Sets the isolation level for this transaction. This overrides any default isolation level set for the client. + :rtype: :class:`~google.cloud.spanner_v1.database.BatchCheckout` :returns: new wrapper """ + return BatchCheckout( self, request_options, max_commit_delay, exclude_txn_from_change_streams, + isolation_level, **kw, ) @@ -886,6 +900,7 @@ def run_in_transaction(self, func, *args, **kw): from being recorded in change streams with the DDL option `allow_txn_exclusion=true`. This does not exclude the transaction from being recorded in the change streams with the DDL option `allow_txn_exclusion` being false or unset. + "isolation_level" sets the isolation level for the transaction. :rtype: Any :returns: The return value of ``func``. @@ -897,7 +912,7 @@ def run_in_transaction(self, func, *args, **kw): with trace_call( "CloudSpanner.Database.run_in_transaction", observability_options=observability_options, - ): + ), MetricsCapture(): # Sanity check: Is there a transaction already running? # If there is, then raise a red flag. Otherwise, mark that this one # is running. @@ -1176,6 +1191,7 @@ def __init__( request_options=None, max_commit_delay=None, exclude_txn_from_change_streams=False, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, **kw, ): self._database = database @@ -1188,6 +1204,7 @@ def __init__( self._request_options = request_options self._max_commit_delay = max_commit_delay self._exclude_txn_from_change_streams = exclude_txn_from_change_streams + self._isolation_level = isolation_level self._kw = kw def __enter__(self): @@ -1209,6 +1226,7 @@ def __exit__(self, exc_type, exc_val, exc_tb): request_options=self._request_options, max_commit_delay=self._max_commit_delay, exclude_txn_from_change_streams=self._exclude_txn_from_change_streams, + isolation_level=self._isolation_level, **self._kw, ) finally: @@ -1489,7 +1507,7 @@ def generate_read_batches( f"CloudSpanner.{type(self).__name__}.generate_read_batches", extra_attributes=dict(table=table, columns=columns), observability_options=self.observability_options, - ): + ), MetricsCapture(): partitions = self._get_snapshot().partition_read( table=table, columns=columns, @@ -1540,7 +1558,7 @@ def process_read_batch( with trace_call( f"CloudSpanner.{type(self).__name__}.process_read_batch", observability_options=observability_options, - ): + ), MetricsCapture(): kwargs = copy.deepcopy(batch["read"]) keyset_dict = kwargs.pop("keyset") kwargs["keyset"] = KeySet._from_dict(keyset_dict) @@ -1625,7 +1643,7 @@ def generate_query_batches( f"CloudSpanner.{type(self).__name__}.generate_query_batches", extra_attributes=dict(sql=sql), observability_options=self.observability_options, - ): + ), MetricsCapture(): partitions = self._get_snapshot().partition_query( sql=sql, params=params, @@ -1681,7 +1699,7 @@ def process_query_batch( with trace_call( f"CloudSpanner.{type(self).__name__}.process_query_batch", observability_options=self.observability_options, - ): + ), MetricsCapture(): return self._get_snapshot().execute_sql( partition=batch["partition"], **batch["query"], @@ -1746,7 +1764,7 @@ def run_partitioned_query( f"CloudSpanner.${type(self).__name__}.run_partitioned_query", extra_attributes=dict(sql=sql), observability_options=self.observability_options, - ): + ), MetricsCapture(): partitions = list( self.generate_query_batches( sql, diff --git a/google/cloud/spanner_v1/gapic_version.py b/google/cloud/spanner_v1/gapic_version.py index 5ea820ffea..9b205942db 100644 --- a/google/cloud/spanner_v1/gapic_version.py +++ b/google/cloud/spanner_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.52.0" # {x-release-please-version} +__version__ = "3.53.0" # {x-release-please-version} diff --git a/google/cloud/spanner_v1/merged_result_set.py b/google/cloud/spanner_v1/merged_result_set.py index bfecad1e46..7af989d696 100644 --- a/google/cloud/spanner_v1/merged_result_set.py +++ b/google/cloud/spanner_v1/merged_result_set.py @@ -18,6 +18,7 @@ from threading import Lock, Event from google.cloud.spanner_v1._opentelemetry_tracing import trace_call +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture if TYPE_CHECKING: from google.cloud.spanner_v1.database import BatchSnapshot @@ -45,7 +46,7 @@ def run(self): with trace_call( "CloudSpanner.PartitionExecutor.run", observability_options=observability_options, - ): + ), MetricsCapture(): self.__run() def __run(self): diff --git a/google/cloud/spanner_v1/metrics/constants.py b/google/cloud/spanner_v1/metrics/constants.py index 5eca1fa83d..a47aecc9ed 100644 --- a/google/cloud/spanner_v1/metrics/constants.py +++ b/google/cloud/spanner_v1/metrics/constants.py @@ -1,4 +1,4 @@ -# Copyright 2025 Google LLC +# Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -15,6 +15,12 @@ BUILT_IN_METRICS_METER_NAME = "gax-python" NATIVE_METRICS_PREFIX = "spanner.googleapis.com/internal/client" SPANNER_RESOURCE_TYPE = "spanner_instance_client" +SPANNER_SERVICE_NAME = "spanner-python" +GOOGLE_CLOUD_RESOURCE_KEY = "google-cloud-resource-prefix" +GOOGLE_CLOUD_REGION_KEY = "cloud.region" +GOOGLE_CLOUD_REGION_GLOBAL = "global" +SPANNER_METHOD_PREFIX = "/google.spanner.v1." +ENABLE_SPANNER_METRICS_ENV_VAR = "SPANNER_ENABLE_BUILTIN_METRICS" # Monitored resource labels MONITORED_RES_LABEL_KEY_PROJECT = "project_id" @@ -61,3 +67,5 @@ METRIC_NAME_OPERATION_COUNT, METRIC_NAME_ATTEMPT_COUNT, ] + +METRIC_EXPORT_INTERVAL_MS = 60000 # 1 Minute diff --git a/google/cloud/spanner_v1/metrics/metrics_capture.py b/google/cloud/spanner_v1/metrics/metrics_capture.py new file mode 100644 index 0000000000..6197ae5257 --- /dev/null +++ b/google/cloud/spanner_v1/metrics/metrics_capture.py @@ -0,0 +1,75 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module provides functionality for capturing metrics in Cloud Spanner operations. + +It includes a context manager class, MetricsCapture, which automatically handles the +start and completion of metrics tracing for a given operation. This ensures that metrics +are consistently recorded for Cloud Spanner operations, facilitating observability and +performance monitoring. +""" + +from .spanner_metrics_tracer_factory import SpannerMetricsTracerFactory + + +class MetricsCapture: + """Context manager for capturing metrics in Cloud Spanner operations. + + This class provides a context manager interface to automatically handle + the start and completion of metrics tracing for a given operation. + """ + + def __enter__(self): + """Enter the runtime context related to this object. + + This method initializes a new metrics tracer for the operation and + records the start of the operation. + + Returns: + MetricsCapture: The instance of the context manager. + """ + # Short circuit out if metrics are disabled + factory = SpannerMetricsTracerFactory() + if not factory.enabled: + return self + + # Define a new metrics tracer for the new operation + SpannerMetricsTracerFactory.current_metrics_tracer = ( + factory.create_metrics_tracer() + ) + if SpannerMetricsTracerFactory.current_metrics_tracer: + SpannerMetricsTracerFactory.current_metrics_tracer.record_operation_start() + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Exit the runtime context related to this object. + + This method records the completion of the operation. If an exception + occurred, it will be propagated after the metrics are recorded. + + Args: + exc_type (Type[BaseException]): The exception type. + exc_value (BaseException): The exception value. + traceback (TracebackType): The traceback object. + + Returns: + bool: False to propagate the exception if any occurred. + """ + # Short circuit out if metrics are disable + if not SpannerMetricsTracerFactory().enabled: + return False + + if SpannerMetricsTracerFactory.current_metrics_tracer: + SpannerMetricsTracerFactory.current_metrics_tracer.record_operation_completion() + return False # Propagate the exception if any diff --git a/google/cloud/spanner_v1/metrics/metrics_exporter.py b/google/cloud/spanner_v1/metrics/metrics_exporter.py index fb32985365..e10cf6a2f1 100644 --- a/google/cloud/spanner_v1/metrics/metrics_exporter.py +++ b/google/cloud/spanner_v1/metrics/metrics_exporter.py @@ -23,7 +23,7 @@ ) import logging -from typing import Optional, List, Union, NoReturn, Tuple +from typing import Optional, List, Union, NoReturn, Tuple, Dict import google.auth from google.api.distribution_pb2 import ( # pylint: disable=no-name-in-module @@ -39,10 +39,6 @@ MonitoredResource, ) -from google.cloud.monitoring_v3.services.metric_service.transports.grpc import ( - MetricServiceGrpcTransport, -) - # pylint: disable=no-name-in-module from google.protobuf.timestamp_pb2 import Timestamp from google.cloud.spanner_v1.gapic_version import __version__ @@ -60,12 +56,9 @@ Sum, ) from opentelemetry.sdk.resources import Resource - - HAS_OPENTELEMETRY_INSTALLED = True -except ImportError: # pragma: NO COVER - HAS_OPENTELEMETRY_INSTALLED = False - -try: + from google.cloud.monitoring_v3.services.metric_service.transports.grpc import ( + MetricServiceGrpcTransport, + ) from google.cloud.monitoring_v3 import ( CreateTimeSeriesRequest, MetricServiceClient, @@ -75,13 +68,10 @@ TypedValue, ) - HAS_GOOGLE_CLOUD_MONITORING_INSTALLED = True -except ImportError: - HAS_GOOGLE_CLOUD_MONITORING_INSTALLED = False - -HAS_DEPENDENCIES_INSTALLED = ( - HAS_OPENTELEMETRY_INSTALLED and HAS_GOOGLE_CLOUD_MONITORING_INSTALLED -) + HAS_OPENTELEMETRY_INSTALLED = True +except ImportError: # pragma: NO COVER + HAS_OPENTELEMETRY_INSTALLED = False + MetricExporter = object logger = logging.getLogger(__name__) MAX_BATCH_WRITE = 200 @@ -120,7 +110,7 @@ class CloudMonitoringMetricsExporter(MetricExporter): def __init__( self, project_id: Optional[str] = None, - client: Optional[MetricServiceClient] = None, + client: Optional["MetricServiceClient"] = None, ): """Initialize a custom exporter to send metrics for the Spanner Service Metrics.""" # Default preferred_temporality is all CUMULATIVE so need to customize @@ -144,7 +134,7 @@ def __init__( self.project_id = project_id self.project_name = self.client.common_project_path(self.project_id) - def _batch_write(self, series: List[TimeSeries], timeout_millis: float) -> None: + def _batch_write(self, series: List["TimeSeries"], timeout_millis: float) -> None: """Cloud Monitoring allows writing up to 200 time series at once. :param series: ProtoBuf TimeSeries @@ -166,8 +156,8 @@ def _batch_write(self, series: List[TimeSeries], timeout_millis: float) -> None: @staticmethod def _resource_to_monitored_resource_pb( - resource: Resource, labels: any - ) -> MonitoredResource: + resource: "Resource", labels: Dict[str, str] + ) -> "MonitoredResource": """ Convert the resource to a Google Cloud Monitoring monitored resource. @@ -182,7 +172,7 @@ def _resource_to_monitored_resource_pb( return monitored_resource @staticmethod - def _to_metric_kind(metric: Metric) -> MetricDescriptor.MetricKind: + def _to_metric_kind(metric: "Metric") -> MetricDescriptor.MetricKind: """ Convert the metric to a Google Cloud Monitoring metric kind. @@ -210,7 +200,7 @@ def _to_metric_kind(metric: Metric) -> MetricDescriptor.MetricKind: @staticmethod def _extract_metric_labels( - data_point: Union[NumberDataPoint, HistogramDataPoint] + data_point: Union["NumberDataPoint", "HistogramDataPoint"] ) -> Tuple[dict, dict]: """ Extract the metric labels from the data point. @@ -233,8 +223,8 @@ def _extract_metric_labels( @staticmethod def _to_point( kind: "MetricDescriptor.MetricKind.V", - data_point: Union[NumberDataPoint, HistogramDataPoint], - ) -> Point: + data_point: Union["NumberDataPoint", "HistogramDataPoint"], + ) -> "Point": # Create a Google Cloud Monitoring data point value based on the OpenTelemetry metric data point type ## For histograms, we need to calculate the mean and bucket counts if isinstance(data_point, HistogramDataPoint): @@ -281,7 +271,7 @@ def _data_point_to_timeseries_pb( metric, monitored_resource, labels, - ) -> TimeSeries: + ) -> "TimeSeries": """ Convert the data point to a Google Cloud Monitoring time series. @@ -308,8 +298,8 @@ def _data_point_to_timeseries_pb( @staticmethod def _resource_metrics_to_timeseries_pb( - metrics_data: MetricsData, - ) -> List[TimeSeries]: + metrics_data: "MetricsData", + ) -> List["TimeSeries"]: """ Convert the metrics data to a list of Google Cloud Monitoring time series. @@ -346,10 +336,10 @@ def _resource_metrics_to_timeseries_pb( def export( self, - metrics_data: MetricsData, + metrics_data: "MetricsData", timeout_millis: float = 10_000, **kwargs, - ) -> MetricExportResult: + ) -> "MetricExportResult": """ Export the metrics data to Google Cloud Monitoring. @@ -357,10 +347,9 @@ def export( :param timeout_millis: timeout in milliseconds :return: MetricExportResult """ - if not HAS_DEPENDENCIES_INSTALLED: + if not HAS_OPENTELEMETRY_INSTALLED: logger.warning("Metric exporter called without dependencies installed.") return False - time_series_list = self._resource_metrics_to_timeseries_pb(metrics_data) self._batch_write(time_series_list, timeout_millis) return True @@ -370,8 +359,8 @@ def force_flush(self, timeout_millis: float = 10_000) -> bool: return True def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: - """Not implemented.""" - pass + """Safely shuts down the exporter and closes all opened GRPC channels.""" + self.client.transport.close() def _timestamp_from_nanos(nanos: int) -> Timestamp: diff --git a/google/cloud/spanner_v1/metrics/metrics_interceptor.py b/google/cloud/spanner_v1/metrics/metrics_interceptor.py new file mode 100644 index 0000000000..4b55056dab --- /dev/null +++ b/google/cloud/spanner_v1/metrics/metrics_interceptor.py @@ -0,0 +1,156 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Interceptor for collecting Cloud Spanner metrics.""" + +from grpc_interceptor import ClientInterceptor +from .constants import ( + GOOGLE_CLOUD_RESOURCE_KEY, + SPANNER_METHOD_PREFIX, +) + +from typing import Dict +from .spanner_metrics_tracer_factory import SpannerMetricsTracerFactory +import re + + +class MetricsInterceptor(ClientInterceptor): + """Interceptor that collects metrics for Cloud Spanner operations.""" + + @staticmethod + def _parse_resource_path(path: str) -> dict: + """Parse the resource path to extract project, instance and database. + + Args: + path (str): The resource path from the request + + Returns: + dict: Extracted resource components + """ + # Match paths like: + # projects/{project}/instances/{instance}/databases/{database}/sessions/{session} + # projects/{project}/instances/{instance}/databases/{database} + # projects/{project}/instances/{instance} + pattern = r"^projects/(?P[^/]+)(/instances/(?P[^/]+))?(/databases/(?P[^/]+))?(/sessions/(?P[^/]+))?.*$" + match = re.match(pattern, path) + if match: + return {k: v for k, v in match.groupdict().items() if v is not None} + return {} + + @staticmethod + def _extract_resource_from_path(metadata: Dict[str, str]) -> Dict[str, str]: + """ + Extracts resource information from the metadata based on the path. + + This method iterates through the metadata dictionary to find the first tuple containing the key 'google-cloud-resource-prefix'. It then extracts the path from this tuple and parses it to extract project, instance, and database information using the _parse_resource_path method. + + Args: + metadata (Dict[str, str]): A dictionary containing metadata information. + + Returns: + Dict[str, str]: A dictionary containing extracted project, instance, and database information. + """ + # Extract resource info from the first metadata tuple containing :path + path = next( + (value for key, value in metadata if key == GOOGLE_CLOUD_RESOURCE_KEY), "" + ) + + resources = MetricsInterceptor._parse_resource_path(path) + return resources + + @staticmethod + def _remove_prefix(s: str, prefix: str) -> str: + """ + This function removes the prefix from the given string. + + Args: + s (str): The string from which the prefix is to be removed. + prefix (str): The prefix to be removed from the string. + + Returns: + str: The string with the prefix removed. + + Note: + This function is used because the `removeprefix` method does not exist in Python 3.8. + """ + if s.startswith(prefix): + return s[len(prefix) :] + return s + + def _set_metrics_tracer_attributes(self, resources: Dict[str, str]) -> None: + """ + Sets the metric tracer attributes based on the provided resources. + + This method updates the current metric tracer's attributes with the project, instance, and database information extracted from the resources dictionary. If the current metric tracer is not set, the method does nothing. + + Args: + resources (Dict[str, str]): A dictionary containing project, instance, and database information. + """ + if SpannerMetricsTracerFactory.current_metrics_tracer is None: + return + + if resources: + if "project" in resources: + SpannerMetricsTracerFactory.current_metrics_tracer.set_project( + resources["project"] + ) + if "instance" in resources: + SpannerMetricsTracerFactory.current_metrics_tracer.set_instance( + resources["instance"] + ) + if "database" in resources: + SpannerMetricsTracerFactory.current_metrics_tracer.set_database( + resources["database"] + ) + + def intercept(self, invoked_method, request_or_iterator, call_details): + """Intercept gRPC calls to collect metrics. + + Args: + invoked_method: The RPC method + request_or_iterator: The RPC request + call_details: Details about the RPC call + + Returns: + The RPC response + """ + factory = SpannerMetricsTracerFactory() + if ( + SpannerMetricsTracerFactory.current_metrics_tracer is None + or not factory.enabled + ): + return invoked_method(request_or_iterator, call_details) + + # Setup Metric Tracer attributes from call details + ## Extract Project / Instance / Databse from header information + resources = self._extract_resource_from_path(call_details.metadata) + self._set_metrics_tracer_attributes(resources) + + ## Format method to be be spanner. + method_name = self._remove_prefix( + call_details.method, SPANNER_METHOD_PREFIX + ).replace("/", ".") + + SpannerMetricsTracerFactory.current_metrics_tracer.set_method(method_name) + SpannerMetricsTracerFactory.current_metrics_tracer.record_attempt_start() + response = invoked_method(request_or_iterator, call_details) + SpannerMetricsTracerFactory.current_metrics_tracer.record_attempt_completion() + + # Process and send GFE metrics if enabled + if SpannerMetricsTracerFactory.current_metrics_tracer.gfe_enabled: + metadata = response.initial_metadata() + SpannerMetricsTracerFactory.current_metrics_trace.record_gfe_metrics( + metadata + ) + return response diff --git a/google/cloud/spanner_v1/metrics/metrics_tracer.py b/google/cloud/spanner_v1/metrics/metrics_tracer.py index 60525d6e4e..87035d9c22 100644 --- a/google/cloud/spanner_v1/metrics/metrics_tracer.py +++ b/google/cloud/spanner_v1/metrics/metrics_tracer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -56,7 +55,7 @@ class MetricAttemptTracer: direct_path_used: bool status: str - def __init__(self): + def __init__(self) -> None: """ Initialize a MetricAttemptTracer instance with default values. @@ -177,37 +176,42 @@ class should not have any knowledge about the observability framework used for m """ _client_attributes: Dict[str, str] - _instrument_attempt_counter: Counter - _instrument_attempt_latency: Histogram - _instrument_operation_counter: Counter - _instrument_operation_latency: Histogram + _instrument_attempt_counter: "Counter" + _instrument_attempt_latency: "Histogram" + _instrument_operation_counter: "Counter" + _instrument_operation_latency: "Histogram" + _instrument_gfe_latency: "Histogram" + _instrument_gfe_missing_header_count: "Counter" current_op: MetricOpTracer enabled: bool + gfe_enabled: bool method: str def __init__( self, enabled: bool, - instrument_attempt_latency: Histogram, - instrument_attempt_counter: Counter, - instrument_operation_latency: Histogram, - instrument_operation_counter: Counter, + instrument_attempt_latency: "Histogram", + instrument_attempt_counter: "Counter", + instrument_operation_latency: "Histogram", + instrument_operation_counter: "Counter", client_attributes: Dict[str, str], + gfe_enabled: bool = False, ): """ Initialize a MetricsTracer instance with the given parameters. - This constructor initializes a MetricsTracer instance with the provided method name, enabled status, direct path enabled status, - instrumented metrics for attempt latency, attempt counter, operation latency, operation counter, and client attributes. - It sets up the necessary metrics tracing infrastructure for recording metrics related to RPC operations. + This constructor sets up a MetricsTracer instance with the specified parameters, including the enabled status, + instruments for measuring and counting attempt and operation metrics, and client attributes. It prepares the + infrastructure needed for recording metrics related to RPC operations. Args: - enabled (bool): A flag indicating if metrics tracing is enabled. - instrument_attempt_latency (Histogram): The instrument for measuring attempt latency. - instrument_attempt_counter (Counter): The instrument for counting attempts. - instrument_operation_latency (Histogram): The instrument for measuring operation latency. - instrument_operation_counter (Counter): The instrument for counting operations. - client_attributes (dict[str, str]): A dictionary of client attributes used for metrics tracing. + enabled (bool): Indicates if metrics tracing is enabled. + instrument_attempt_latency (Histogram): Instrument for measuring attempt latency. + instrument_attempt_counter (Counter): Instrument for counting attempts. + instrument_operation_latency (Histogram): Instrument for measuring operation latency. + instrument_operation_counter (Counter): Instrument for counting operations. + client_attributes (Dict[str, str]): Dictionary of client attributes used for metrics tracing. + gfe_enabled (bool, optional): Indicates if GFE metrics are enabled. Defaults to False. """ self.current_op = MetricOpTracer() self._client_attributes = client_attributes @@ -216,6 +220,7 @@ def __init__( self._instrument_operation_latency = instrument_operation_latency self._instrument_operation_counter = instrument_operation_counter self.enabled = enabled + self.gfe_enabled = gfe_enabled @staticmethod def _get_ms_time_diff(start: datetime, end: datetime) -> float: @@ -251,7 +256,7 @@ def client_attributes(self) -> Dict[str, str]: return self._client_attributes @property - def instrument_attempt_counter(self) -> Counter: + def instrument_attempt_counter(self) -> "Counter": """ Return the instrument for counting attempts. @@ -264,7 +269,7 @@ def instrument_attempt_counter(self) -> Counter: return self._instrument_attempt_counter @property - def instrument_attempt_latency(self) -> Histogram: + def instrument_attempt_latency(self) -> "Histogram": """ Return the instrument for measuring attempt latency. @@ -277,7 +282,7 @@ def instrument_attempt_latency(self) -> Histogram: return self._instrument_attempt_latency @property - def instrument_operation_counter(self) -> Counter: + def instrument_operation_counter(self) -> "Counter": """ Return the instrument for counting operations. @@ -290,7 +295,7 @@ def instrument_operation_counter(self) -> Counter: return self._instrument_operation_counter @property - def instrument_operation_latency(self) -> Histogram: + def instrument_operation_latency(self) -> "Histogram": """ Return the instrument for measuring operation latency. @@ -322,7 +327,7 @@ def record_attempt_completion(self, status: str = StatusCode.OK.name) -> None: If metrics tracing is not enabled, this method does not perform any operations. """ - if not self.enabled: + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED: return self.current_op.current_attempt.status = status @@ -347,7 +352,7 @@ def record_operation_start(self) -> None: It is used to track the start time of an operation, which is essential for calculating operation latency and other metrics. If metrics tracing is not enabled, this method does not perform any operations. """ - if not self.enabled: + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED: return self.current_op.start() @@ -360,7 +365,7 @@ def record_operation_completion(self) -> None: Additionally, it increments the operation count and records the attempt count for the operation. If metrics tracing is not enabled, this method does not perform any operations. """ - if not self.enabled: + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED: return end_time = datetime.now() # Build Attributes @@ -385,6 +390,29 @@ def record_operation_completion(self) -> None: self.current_op.attempt_count, attributes=attempt_attributes ) + def record_gfe_latency(self, latency: int) -> None: + """ + Records the GFE latency using the Histogram instrument. + + Args: + latency (int): The latency duration to be recorded. + """ + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED or not self.gfe_enabled: + return + self._instrument_gfe_latency.record( + amount=latency, attributes=self.client_attributes + ) + + def record_gfe_missing_header_count(self) -> None: + """ + Increments the counter for missing GFE headers. + """ + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED or not self.gfe_enabled: + return + self._instrument_gfe_missing_header_count.add( + amount=1, attributes=self.client_attributes + ) + def _create_operation_otel_attributes(self) -> dict: """ Create additional attributes for operation metrics tracing. @@ -392,11 +420,11 @@ def _create_operation_otel_attributes(self) -> dict: This method populates the client attributes dictionary with the operation status if metrics tracing is enabled. It returns the updated client attributes dictionary. """ - if not self.enabled: + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED: return {} - - self._client_attributes[METRIC_LABEL_KEY_STATUS] = self.current_op.status - return self._client_attributes + attributes = self._client_attributes.copy() + attributes[METRIC_LABEL_KEY_STATUS] = self.current_op.status + return attributes def _create_attempt_otel_attributes(self) -> dict: """ @@ -405,14 +433,16 @@ def _create_attempt_otel_attributes(self) -> dict: This method populates the attributes dictionary with the attempt status if metrics tracing is enabled and an attempt exists. It returns the updated attributes dictionary. """ - if not self.enabled: + if not self.enabled or not HAS_OPENTELEMETRY_INSTALLED: return {} - attributes = {} + attributes = self._client_attributes.copy() + # Short circuit out if we don't have an attempt - if self.current_op.current_attempt is not None: - attributes[METRIC_LABEL_KEY_STATUS] = self.current_op.current_attempt.status + if self.current_op.current_attempt is None: + return attributes + attributes[METRIC_LABEL_KEY_STATUS] = self.current_op.current_attempt.status return attributes def set_project(self, project: str) -> "MetricsTracer": diff --git a/google/cloud/spanner_v1/metrics/metrics_tracer_factory.py b/google/cloud/spanner_v1/metrics/metrics_tracer_factory.py index f7a4088019..ed4b270f06 100644 --- a/google/cloud/spanner_v1/metrics/metrics_tracer_factory.py +++ b/google/cloud/spanner_v1/metrics/metrics_tracer_factory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -32,6 +31,8 @@ METRIC_LABEL_KEY_DATABASE, METRIC_LABEL_KEY_DIRECT_PATH_ENABLED, BUILT_IN_METRICS_METER_NAME, + METRIC_NAME_GFE_LATENCY, + METRIC_NAME_GFE_MISSING_HEADER_COUNT, ) from typing import Dict @@ -50,26 +51,29 @@ class MetricsTracerFactory: """Factory class for creating MetricTracer instances. This class facilitates the creation of MetricTracer objects, which are responsible for collecting and tracing metrics.""" enabled: bool - _instrument_attempt_latency: Histogram - _instrument_attempt_counter: Counter - _instrument_operation_latency: Histogram - _instrument_operation_counter: Counter + gfe_enabled: bool + _instrument_attempt_latency: "Histogram" + _instrument_attempt_counter: "Counter" + _instrument_operation_latency: "Histogram" + _instrument_operation_counter: "Counter" + _instrument_gfe_latency: "Histogram" + _instrument_gfe_missing_header_count: "Counter" _client_attributes: Dict[str, str] @property - def instrument_attempt_latency(self) -> Histogram: + def instrument_attempt_latency(self) -> "Histogram": return self._instrument_attempt_latency @property - def instrument_attempt_counter(self) -> Counter: + def instrument_attempt_counter(self) -> "Counter": return self._instrument_attempt_counter @property - def instrument_operation_latency(self) -> Histogram: + def instrument_operation_latency(self) -> "Histogram": return self._instrument_operation_latency @property - def instrument_operation_counter(self) -> Counter: + def instrument_operation_counter(self) -> "Counter": return self._instrument_operation_counter def __init__(self, enabled: bool, service_name: str): @@ -255,6 +259,9 @@ def create_metrics_tracer(self) -> MetricsTracer: Returns: MetricsTracer: A MetricsTracer instance with default settings and client attributes. """ + if not HAS_OPENTELEMETRY_INSTALLED: + return None + metrics_tracer = MetricsTracer( enabled=self.enabled and HAS_OPENTELEMETRY_INSTALLED, instrument_attempt_latency=self._instrument_attempt_latency, @@ -307,3 +314,15 @@ def _create_metric_instruments(self, service_name: str) -> None: unit="1", description="Number of operations.", ) + + self._instrument_gfe_latency = meter.create_histogram( + name=METRIC_NAME_GFE_LATENCY, + unit="ms", + description="GFE Latency.", + ) + + self._instrument_gfe_missing_header_count = meter.create_counter( + name=METRIC_NAME_GFE_MISSING_HEADER_COUNT, + unit="1", + description="GFE missing header count.", + ) diff --git a/google/cloud/spanner_v1/metrics/spanner_metrics_tracer_factory.py b/google/cloud/spanner_v1/metrics/spanner_metrics_tracer_factory.py new file mode 100644 index 0000000000..fd00c4de9c --- /dev/null +++ b/google/cloud/spanner_v1/metrics/spanner_metrics_tracer_factory.py @@ -0,0 +1,172 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""This module provides a singleton factory for creating SpannerMetricsTracer instances.""" + +from .metrics_tracer_factory import MetricsTracerFactory +import os +from .constants import ( + SPANNER_SERVICE_NAME, + GOOGLE_CLOUD_REGION_KEY, + GOOGLE_CLOUD_REGION_GLOBAL, +) + +try: + from opentelemetry.resourcedetector import gcp_resource_detector + + # Overwrite the requests timeout for the detector. + # This is necessary as the client will wait the full timeout if the + # code is not run in a GCP environment, with the location endpoints available. + gcp_resource_detector._TIMEOUT_SEC = 0.2 + + import mmh3 + + # Override Resource detector logging to not warn when GCP resources are not detected + import logging + + logging.getLogger("opentelemetry.resourcedetector.gcp_resource_detector").setLevel( + logging.ERROR + ) + + HAS_OPENTELEMETRY_INSTALLED = True +except ImportError: # pragma: NO COVER + HAS_OPENTELEMETRY_INSTALLED = False + +from .metrics_tracer import MetricsTracer +from google.cloud.spanner_v1 import __version__ +from uuid import uuid4 + + +class SpannerMetricsTracerFactory(MetricsTracerFactory): + """A factory for creating SpannerMetricsTracer instances.""" + + _metrics_tracer_factory: "SpannerMetricsTracerFactory" = None + current_metrics_tracer: MetricsTracer = None + + def __new__( + cls, enabled: bool = True, gfe_enabled: bool = False + ) -> "SpannerMetricsTracerFactory": + """ + Create a new instance of SpannerMetricsTracerFactory if it doesn't already exist. + + This method implements the singleton pattern for the SpannerMetricsTracerFactory class. + It initializes the factory with the necessary client attributes and configuration settings + if it hasn't been created yet. + + Args: + enabled (bool): A flag indicating whether metrics tracing is enabled. Defaults to True. + gfe_enabled (bool): A flag indicating whether GFE metrics are enabled. Defaults to False. + + Returns: + SpannerMetricsTracerFactory: The singleton instance of SpannerMetricsTracerFactory. + """ + if cls._metrics_tracer_factory is None: + cls._metrics_tracer_factory = MetricsTracerFactory( + enabled, SPANNER_SERVICE_NAME + ) + if not HAS_OPENTELEMETRY_INSTALLED: + return cls._metrics_tracer_factory + + client_uid = cls._generate_client_uid() + cls._metrics_tracer_factory.set_client_uid(client_uid) + cls._metrics_tracer_factory.set_instance_config(cls._get_instance_config()) + cls._metrics_tracer_factory.set_client_name(cls._get_client_name()) + cls._metrics_tracer_factory.set_client_hash( + cls._generate_client_hash(client_uid) + ) + cls._metrics_tracer_factory.set_location(cls._get_location()) + cls._metrics_tracer_factory.gfe_enabled = gfe_enabled + + if cls._metrics_tracer_factory.enabled != enabled: + cls._metrics_tracer_factory.enabeld = enabled + + return cls._metrics_tracer_factory + + @staticmethod + def _generate_client_uid() -> str: + """Generate a client UID in the form of uuidv4@pid@hostname. + + This method generates a unique client identifier (UID) by combining a UUID version 4, + the process ID (PID), and the hostname. The PID is limited to the first 10 characters. + + Returns: + str: A string representing the client UID in the format uuidv4@pid@hostname. + """ + try: + hostname = os.uname()[1] + pid = str(os.getpid())[0:10] # Limit PID to 10 characters + uuid = uuid4() + return f"{uuid}@{pid}@{hostname}" + except Exception: + return "" + + @staticmethod + def _get_instance_config() -> str: + """Get the instance configuration.""" + # TODO: unknown until there's a good way to get it. + return "unknown" + + @staticmethod + def _get_client_name() -> str: + """Get the client name.""" + return f"{SPANNER_SERVICE_NAME}/{__version__}" + + @staticmethod + def _generate_client_hash(client_uid: str) -> str: + """ + Generate a 6-digit zero-padded lowercase hexadecimal hash using the 10 most significant bits of a 64-bit hash value. + + The primary purpose of this function is to generate a hash value for the `client_hash` + resource label using `client_uid` metric field. The range of values is chosen to be small + enough to keep the cardinality of the Resource targets under control. Note: If at later time + the range needs to be increased, it can be done by increasing the value of `kPrefixLength` to + up to 24 bits without changing the format of the returned value. + + Args: + client_uid (str): The client UID used to generate the hash. + + Returns: + str: A 6-digit zero-padded lowercase hexadecimal hash. + """ + if not client_uid: + return "000000" + hashed_client = mmh3.hash64(client_uid) + + # Join the hashes back together since mmh3 splits into high and low 32bits + full_hash = (hashed_client[0] << 32) | (hashed_client[1] & 0xFFFFFFFF) + unsigned_hash = full_hash & 0xFFFFFFFFFFFFFFFF + + k_prefix_length = 10 + sig_figs = unsigned_hash >> (64 - k_prefix_length) + + # Return as 6 digit zero padded hex string + return f"{sig_figs:06x}" + + @staticmethod + def _get_location() -> str: + """Get the location of the resource. + + Returns: + str: The location of the resource. If OpenTelemetry is not installed, returns a global region. + """ + if not HAS_OPENTELEMETRY_INSTALLED: + return GOOGLE_CLOUD_REGION_GLOBAL + detector = gcp_resource_detector.GoogleCloudResourceDetector() + resources = detector.detect() + + if GOOGLE_CLOUD_REGION_KEY not in resources.attributes: + return GOOGLE_CLOUD_REGION_GLOBAL + else: + return resources[GOOGLE_CLOUD_REGION_KEY] diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 596f76a1f1..0c4dd5a63b 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -32,6 +32,8 @@ ) from warnings import warn +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture + _NOW = datetime.datetime.utcnow # unit tests may replace @@ -242,7 +244,8 @@ def bind(self, database): with trace_call( "CloudSpanner.FixedPool.BatchCreateSessions", observability_options=observability_options, - ) as span: + metadata=metadata, + ) as span, MetricsCapture(): returned_session_count = 0 while not self._sessions.full(): request.session_count = requested_session_count - self._sessions.qsize() @@ -552,7 +555,8 @@ def bind(self, database): with trace_call( "CloudSpanner.PingingPool.BatchCreateSessions", observability_options=observability_options, - ) as span: + metadata=metadata, + ) as span, MetricsCapture(): returned_session_count = 0 while returned_session_count < self.size: resp = api.batch_create_sessions( diff --git a/google/cloud/spanner_v1/services/spanner/async_client.py b/google/cloud/spanner_v1/services/spanner/async_client.py index 992a74503c..a8bdb5ee4c 100644 --- a/google/cloud/spanner_v1/services/spanner/async_client.py +++ b/google/cloud/spanner_v1/services/spanner/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -58,6 +59,15 @@ from .transports.grpc_asyncio import SpannerGrpcAsyncIOTransport from .client import SpannerClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class SpannerAsyncClient: """Cloud Spanner API @@ -261,6 +271,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.spanner_v1.SpannerAsyncClient`.", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.spanner.v1.Spanner", + "credentialsType": None, + }, + ) + async def create_session( self, request: Optional[Union[spanner.CreateSessionRequest, dict]] = None, @@ -268,7 +300,7 @@ async def create_session( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.Session: r"""Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner @@ -330,8 +362,10 @@ async def sample_create_session(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.Session: @@ -391,7 +425,7 @@ async def batch_create_sessions( session_count: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.BatchCreateSessionsResponse: r"""Creates multiple new sessions. @@ -452,8 +486,10 @@ async def sample_batch_create_sessions(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.BatchCreateSessionsResponse: @@ -516,7 +552,7 @@ async def get_session( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.Session: r"""Gets a session. Returns ``NOT_FOUND`` if the session does not exist. This is mainly useful for determining whether a session @@ -562,8 +598,10 @@ async def sample_get_session(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.Session: @@ -622,7 +660,7 @@ async def list_sessions( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListSessionsAsyncPager: r"""Lists all sessions in a given database. @@ -667,8 +705,10 @@ async def sample_list_sessions(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.services.spanner.pagers.ListSessionsAsyncPager: @@ -743,7 +783,7 @@ async def delete_session( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Ends a session, releasing server resources associated with it. This will asynchronously trigger cancellation @@ -786,8 +826,10 @@ async def sample_delete_session(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -838,7 +880,7 @@ async def execute_sql( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> result_set.ResultSet: r"""Executes an SQL statement, returning all results in a single reply. This method cannot be used to return a result set larger @@ -890,8 +932,10 @@ async def sample_execute_sql(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.ResultSet: @@ -937,7 +981,7 @@ def execute_streaming_sql( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[result_set.PartialResultSet]]: r"""Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result set as a stream. Unlike @@ -982,8 +1026,10 @@ async def sample_execute_streaming_sql(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.spanner_v1.types.PartialResultSet]: @@ -1032,7 +1078,7 @@ async def execute_batch_dml( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.ExecuteBatchDmlResponse: r"""Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them @@ -1087,8 +1133,10 @@ async def sample_execute_batch_dml(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.ExecuteBatchDmlResponse: @@ -1174,7 +1222,7 @@ async def read( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> result_set.ResultSet: r"""Reads rows from the database using key lookups and scans, as a simple key/value style alternative to @@ -1228,8 +1276,10 @@ async def sample_read(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.ResultSet: @@ -1273,7 +1323,7 @@ def streaming_read( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[result_set.PartialResultSet]]: r"""Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a stream. Unlike @@ -1319,8 +1369,10 @@ async def sample_streaming_read(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.spanner_v1.types.PartialResultSet]: @@ -1371,7 +1423,7 @@ async def begin_transaction( options: Optional[transaction.TransactionOptions] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> transaction.Transaction: r"""Begins a new transaction. This step can often be skipped: [Read][google.spanner.v1.Spanner.Read], @@ -1426,8 +1478,10 @@ async def sample_begin_transaction(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.Transaction: @@ -1491,7 +1545,7 @@ async def commit( single_use_transaction: Optional[transaction.TransactionOptions] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> commit_response.CommitResponse: r"""Commits a transaction. The request includes the mutations to be applied to rows in the database. @@ -1582,8 +1636,10 @@ async def sample_commit(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.CommitResponse: @@ -1651,7 +1707,7 @@ async def rollback( transaction_id: Optional[bytes] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Rolls back a transaction, releasing any locks it holds. It is a good idea to call this for any transaction that includes one or @@ -1709,8 +1765,10 @@ async def sample_rollback(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1761,7 +1819,7 @@ async def partition_query( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.PartitionResponse: r"""Creates a set of partition tokens that can be used to execute a query operation in parallel. Each of the returned partition @@ -1812,8 +1870,10 @@ async def sample_partition_query(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.PartitionResponse: @@ -1860,7 +1920,7 @@ async def partition_read( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.PartitionResponse: r"""Creates a set of partition tokens that can be used to execute a read operation in parallel. Each of the returned partition @@ -1914,8 +1974,10 @@ async def sample_partition_read(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.PartitionResponse: @@ -1966,7 +2028,7 @@ def batch_write( ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[spanner.BatchWriteResponse]]: r"""Batches the supplied mutation groups in a collection of efficient transactions. All mutations in a group are @@ -2040,8 +2102,10 @@ async def sample_batch_write(): retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.spanner_v1.types.BatchWriteResponse]: diff --git a/google/cloud/spanner_v1/services/spanner/client.py b/google/cloud/spanner_v1/services/spanner/client.py index 96b90bb21c..e0768ce742 100644 --- a/google/cloud/spanner_v1/services/spanner/client.py +++ b/google/cloud/spanner_v1/services/spanner/client.py @@ -14,6 +14,9 @@ # limitations under the License. # from collections import OrderedDict +from http import HTTPStatus +import json +import logging as std_logging import os import re from typing import ( @@ -49,6 +52,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.cloud.spanner_v1.services.spanner import pagers from google.cloud.spanner_v1.types import commit_response from google.cloud.spanner_v1.types import mutation @@ -62,6 +74,7 @@ from .transports.grpc import SpannerGrpcTransport from .transports.grpc_asyncio import SpannerGrpcAsyncIOTransport from .transports.rest import SpannerRestTransport +from google.cloud.spanner_v1.metrics.metrics_interceptor import MetricsInterceptor class SpannerClientMeta(type): @@ -494,52 +507,45 @@ def _get_universe_domain( raise ValueError("Universe Domain cannot be an empty string.") return universe_domain - @staticmethod - def _compare_universes( - client_universe: str, credentials: ga_credentials.Credentials - ) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. Returns: - bool: True iff client_universe matches the universe in credentials. + bool: True iff the configured universe domain is valid. Raises: - ValueError: when client_universe does not match the universe in credentials. + ValueError: If the configured universe domain is not valid. """ - default_universe = SpannerClient._DEFAULT_UNIVERSE - credentials_universe = getattr(credentials, "universe_domain", default_universe) - - if client_universe != credentials_universe: - raise ValueError( - "The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default." - ) + # NOTE (b/349488459): universe validation is disabled until further notice. return True - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. + def _add_cred_info_for_auth_errors( + self, error: core_exceptions.GoogleAPICallError + ) -> None: + """Adds credential info string to error details for 401/403/404 errors. - Raises: - ValueError: If the configured universe domain is not valid. + Args: + error (google.api_core.exceptions.GoogleAPICallError): The error to add the cred info. """ - self._is_universe_domain_valid = ( - self._is_universe_domain_valid - or SpannerClient._compare_universes( - self.universe_domain, self.transport._credentials - ) - ) - return self._is_universe_domain_valid + if error.code not in [ + HTTPStatus.UNAUTHORIZED, + HTTPStatus.FORBIDDEN, + HTTPStatus.NOT_FOUND, + ]: + return + + cred = self._transport._credentials + + # get_cred_info is only available in google-auth>=2.35.0 + if not hasattr(cred, "get_cred_info"): + return + + # ignore the type check since pypy test fails when get_cred_info + # is not available + cred_info = cred.get_cred_info() # type: ignore + if cred_info and hasattr(error._details, "append"): + error._details.append(json.dumps(cred_info)) @property def api_endpoint(self): @@ -645,6 +651,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -705,8 +715,32 @@ def __init__( client_info=client_info, always_use_jwt_access=True, api_audience=self._client_options.api_audience, + metrics_interceptor=MetricsInterceptor(), ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.spanner_v1.SpannerClient`.", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.spanner.v1.Spanner", + "credentialsType": None, + }, + ) + def create_session( self, request: Optional[Union[spanner.CreateSessionRequest, dict]] = None, @@ -714,7 +748,7 @@ def create_session( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.Session: r"""Creates a new session. A session can be used to perform transactions that read and/or modify data in a Cloud Spanner @@ -776,8 +810,10 @@ def sample_create_session(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.Session: @@ -834,7 +870,7 @@ def batch_create_sessions( session_count: Optional[int] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.BatchCreateSessionsResponse: r"""Creates multiple new sessions. @@ -895,8 +931,10 @@ def sample_batch_create_sessions(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.BatchCreateSessionsResponse: @@ -956,7 +994,7 @@ def get_session( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.Session: r"""Gets a session. Returns ``NOT_FOUND`` if the session does not exist. This is mainly useful for determining whether a session @@ -1002,8 +1040,10 @@ def sample_get_session(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.Session: @@ -1059,7 +1099,7 @@ def list_sessions( database: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListSessionsPager: r"""Lists all sessions in a given database. @@ -1104,8 +1144,10 @@ def sample_list_sessions(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.services.spanner.pagers.ListSessionsPager: @@ -1177,7 +1219,7 @@ def delete_session( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Ends a session, releasing server resources associated with it. This will asynchronously trigger cancellation @@ -1220,8 +1262,10 @@ def sample_delete_session(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1269,7 +1313,7 @@ def execute_sql( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> result_set.ResultSet: r"""Executes an SQL statement, returning all results in a single reply. This method cannot be used to return a result set larger @@ -1321,8 +1365,10 @@ def sample_execute_sql(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.ResultSet: @@ -1366,7 +1412,7 @@ def execute_streaming_sql( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[result_set.PartialResultSet]: r"""Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result set as a stream. Unlike @@ -1411,8 +1457,10 @@ def sample_execute_streaming_sql(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.spanner_v1.types.PartialResultSet]: @@ -1459,7 +1507,7 @@ def execute_batch_dml( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.ExecuteBatchDmlResponse: r"""Executes a batch of SQL DML statements. This method allows many statements to be run with lower latency than submitting them @@ -1514,8 +1562,10 @@ def sample_execute_batch_dml(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.ExecuteBatchDmlResponse: @@ -1599,7 +1649,7 @@ def read( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> result_set.ResultSet: r"""Reads rows from the database using key lookups and scans, as a simple key/value style alternative to @@ -1653,8 +1703,10 @@ def sample_read(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.ResultSet: @@ -1698,7 +1750,7 @@ def streaming_read( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[result_set.PartialResultSet]: r"""Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a stream. Unlike @@ -1744,8 +1796,10 @@ def sample_streaming_read(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.spanner_v1.types.PartialResultSet]: @@ -1794,7 +1848,7 @@ def begin_transaction( options: Optional[transaction.TransactionOptions] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> transaction.Transaction: r"""Begins a new transaction. This step can often be skipped: [Read][google.spanner.v1.Spanner.Read], @@ -1849,8 +1903,10 @@ def sample_begin_transaction(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.Transaction: @@ -1911,7 +1967,7 @@ def commit( single_use_transaction: Optional[transaction.TransactionOptions] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> commit_response.CommitResponse: r"""Commits a transaction. The request includes the mutations to be applied to rows in the database. @@ -2002,8 +2058,10 @@ def sample_commit(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.CommitResponse: @@ -2070,7 +2128,7 @@ def rollback( transaction_id: Optional[bytes] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Rolls back a transaction, releasing any locks it holds. It is a good idea to call this for any transaction that includes one or @@ -2128,8 +2186,10 @@ def sample_rollback(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2179,7 +2239,7 @@ def partition_query( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.PartitionResponse: r"""Creates a set of partition tokens that can be used to execute a query operation in parallel. Each of the returned partition @@ -2230,8 +2290,10 @@ def sample_partition_query(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.PartitionResponse: @@ -2276,7 +2338,7 @@ def partition_read( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.PartitionResponse: r"""Creates a set of partition tokens that can be used to execute a read operation in parallel. Each of the returned partition @@ -2330,8 +2392,10 @@ def sample_partition_read(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.spanner_v1.types.PartitionResponse: @@ -2380,7 +2444,7 @@ def batch_write( ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[spanner.BatchWriteResponse]: r"""Batches the supplied mutation groups in a collection of efficient transactions. All mutations in a group are @@ -2454,8 +2518,10 @@ def sample_batch_write(): retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]: diff --git a/google/cloud/spanner_v1/services/spanner/pagers.py b/google/cloud/spanner_v1/services/spanner/pagers.py index 54b517f463..2341e99378 100644 --- a/google/cloud/spanner_v1/services/spanner/pagers.py +++ b/google/cloud/spanner_v1/services/spanner/pagers.py @@ -66,7 +66,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -80,8 +80,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner.ListSessionsRequest(request) @@ -140,7 +142,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -154,8 +156,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = spanner.ListSessionsRequest(request) diff --git a/google/cloud/spanner_v1/services/spanner/transports/base.py b/google/cloud/spanner_v1/services/spanner/transports/base.py index 14c8e8d02f..8fa85af24d 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/base.py +++ b/google/cloud/spanner_v1/services/spanner/transports/base.py @@ -30,6 +30,7 @@ from google.cloud.spanner_v1.types import result_set from google.cloud.spanner_v1.types import spanner from google.cloud.spanner_v1.types import transaction +from google.cloud.spanner_v1.metrics.metrics_interceptor import MetricsInterceptor from google.protobuf import empty_pb2 # type: ignore DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( @@ -58,6 +59,7 @@ def __init__( client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, + metrics_interceptor: Optional[MetricsInterceptor] = None, **kwargs, ) -> None: """Instantiate the transport. diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc.py b/google/cloud/spanner_v1/services/spanner/transports/grpc.py index a2afa32174..d325442dc9 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -21,16 +24,96 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.spanner_v1.types import commit_response from google.cloud.spanner_v1.types import result_set from google.cloud.spanner_v1.types import spanner from google.cloud.spanner_v1.types import transaction + +from google.cloud.spanner_v1.metrics.metrics_interceptor import MetricsInterceptor from google.protobuf import empty_pb2 # type: ignore from .base import SpannerTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class SpannerGrpcTransport(SpannerTransport): """gRPC backend transport for Spanner. @@ -66,6 +149,7 @@ def __init__( client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, + metrics_interceptor: Optional[MetricsInterceptor] = None, ) -> None: """Instantiate the transport. @@ -121,6 +205,7 @@ def __init__( self._grpc_channel = None self._ssl_channel_credentials = ssl_channel_credentials self._stubs: Dict[str, Callable] = {} + self._metrics_interceptor = None if api_mtls_endpoint: warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) @@ -187,7 +272,19 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + # Wrap the gRPC channel with the metric interceptor + if metrics_interceptor is not None: + self._metrics_interceptor = metrics_interceptor + self._grpc_channel = grpc.intercept_channel( + self._grpc_channel, metrics_interceptor + ) + + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -279,7 +376,7 @@ def create_session( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_session" not in self._stubs: - self._stubs["create_session"] = self.grpc_channel.unary_unary( + self._stubs["create_session"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/CreateSession", request_serializer=spanner.CreateSessionRequest.serialize, response_deserializer=spanner.Session.deserialize, @@ -311,7 +408,7 @@ def batch_create_sessions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_create_sessions" not in self._stubs: - self._stubs["batch_create_sessions"] = self.grpc_channel.unary_unary( + self._stubs["batch_create_sessions"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/BatchCreateSessions", request_serializer=spanner.BatchCreateSessionsRequest.serialize, response_deserializer=spanner.BatchCreateSessionsResponse.deserialize, @@ -337,7 +434,7 @@ def get_session(self) -> Callable[[spanner.GetSessionRequest], spanner.Session]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_session" not in self._stubs: - self._stubs["get_session"] = self.grpc_channel.unary_unary( + self._stubs["get_session"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/GetSession", request_serializer=spanner.GetSessionRequest.serialize, response_deserializer=spanner.Session.deserialize, @@ -363,7 +460,7 @@ def list_sessions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_sessions" not in self._stubs: - self._stubs["list_sessions"] = self.grpc_channel.unary_unary( + self._stubs["list_sessions"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/ListSessions", request_serializer=spanner.ListSessionsRequest.serialize, response_deserializer=spanner.ListSessionsResponse.deserialize, @@ -391,7 +488,7 @@ def delete_session( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_session" not in self._stubs: - self._stubs["delete_session"] = self.grpc_channel.unary_unary( + self._stubs["delete_session"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/DeleteSession", request_serializer=spanner.DeleteSessionRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -430,7 +527,7 @@ def execute_sql( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_sql" not in self._stubs: - self._stubs["execute_sql"] = self.grpc_channel.unary_unary( + self._stubs["execute_sql"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/ExecuteSql", request_serializer=spanner.ExecuteSqlRequest.serialize, response_deserializer=result_set.ResultSet.deserialize, @@ -461,7 +558,7 @@ def execute_streaming_sql( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_streaming_sql" not in self._stubs: - self._stubs["execute_streaming_sql"] = self.grpc_channel.unary_stream( + self._stubs["execute_streaming_sql"] = self._logged_channel.unary_stream( "/google.spanner.v1.Spanner/ExecuteStreamingSql", request_serializer=spanner.ExecuteSqlRequest.serialize, response_deserializer=result_set.PartialResultSet.deserialize, @@ -500,7 +597,7 @@ def execute_batch_dml( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_batch_dml" not in self._stubs: - self._stubs["execute_batch_dml"] = self.grpc_channel.unary_unary( + self._stubs["execute_batch_dml"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/ExecuteBatchDml", request_serializer=spanner.ExecuteBatchDmlRequest.serialize, response_deserializer=spanner.ExecuteBatchDmlResponse.deserialize, @@ -538,7 +635,7 @@ def read(self) -> Callable[[spanner.ReadRequest], result_set.ResultSet]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read" not in self._stubs: - self._stubs["read"] = self.grpc_channel.unary_unary( + self._stubs["read"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/Read", request_serializer=spanner.ReadRequest.serialize, response_deserializer=result_set.ResultSet.deserialize, @@ -569,7 +666,7 @@ def streaming_read( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "streaming_read" not in self._stubs: - self._stubs["streaming_read"] = self.grpc_channel.unary_stream( + self._stubs["streaming_read"] = self._logged_channel.unary_stream( "/google.spanner.v1.Spanner/StreamingRead", request_serializer=spanner.ReadRequest.serialize, response_deserializer=result_set.PartialResultSet.deserialize, @@ -599,7 +696,7 @@ def begin_transaction( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "begin_transaction" not in self._stubs: - self._stubs["begin_transaction"] = self.grpc_channel.unary_unary( + self._stubs["begin_transaction"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/BeginTransaction", request_serializer=spanner.BeginTransactionRequest.serialize, response_deserializer=transaction.Transaction.deserialize, @@ -640,7 +737,7 @@ def commit( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "commit" not in self._stubs: - self._stubs["commit"] = self.grpc_channel.unary_unary( + self._stubs["commit"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/Commit", request_serializer=spanner.CommitRequest.serialize, response_deserializer=commit_response.CommitResponse.deserialize, @@ -673,7 +770,7 @@ def rollback(self) -> Callable[[spanner.RollbackRequest], empty_pb2.Empty]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "rollback" not in self._stubs: - self._stubs["rollback"] = self.grpc_channel.unary_unary( + self._stubs["rollback"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/Rollback", request_serializer=spanner.RollbackRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -712,7 +809,7 @@ def partition_query( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partition_query" not in self._stubs: - self._stubs["partition_query"] = self.grpc_channel.unary_unary( + self._stubs["partition_query"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/PartitionQuery", request_serializer=spanner.PartitionQueryRequest.serialize, response_deserializer=spanner.PartitionResponse.deserialize, @@ -754,7 +851,7 @@ def partition_read( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partition_read" not in self._stubs: - self._stubs["partition_read"] = self.grpc_channel.unary_unary( + self._stubs["partition_read"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/PartitionRead", request_serializer=spanner.PartitionReadRequest.serialize, response_deserializer=spanner.PartitionResponse.deserialize, @@ -798,7 +895,7 @@ def batch_write( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_write" not in self._stubs: - self._stubs["batch_write"] = self.grpc_channel.unary_stream( + self._stubs["batch_write"] = self._logged_channel.unary_stream( "/google.spanner.v1.Spanner/BatchWrite", request_serializer=spanner.BatchWriteRequest.serialize, response_deserializer=spanner.BatchWriteResponse.deserialize, @@ -806,7 +903,7 @@ def batch_write( return self._stubs["batch_write"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py index 9092ccf61d..475717ae2a 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py +++ b/google/cloud/spanner_v1/services/spanner/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -23,18 +26,98 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.spanner_v1.types import commit_response from google.cloud.spanner_v1.types import result_set from google.cloud.spanner_v1.types import spanner from google.cloud.spanner_v1.types import transaction +from google.cloud.spanner_v1.metrics.metrics_interceptor import MetricsInterceptor from google.protobuf import empty_pb2 # type: ignore from .base import SpannerTransport, DEFAULT_CLIENT_INFO from .grpc import SpannerGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class SpannerGrpcAsyncIOTransport(SpannerTransport): """gRPC AsyncIO backend transport for Spanner. @@ -113,6 +196,7 @@ def __init__( client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, + metrics_interceptor: Optional[MetricsInterceptor] = None, ) -> None: """Instantiate the transport. @@ -234,10 +318,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -287,7 +374,7 @@ def create_session( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_session" not in self._stubs: - self._stubs["create_session"] = self.grpc_channel.unary_unary( + self._stubs["create_session"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/CreateSession", request_serializer=spanner.CreateSessionRequest.serialize, response_deserializer=spanner.Session.deserialize, @@ -320,7 +407,7 @@ def batch_create_sessions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_create_sessions" not in self._stubs: - self._stubs["batch_create_sessions"] = self.grpc_channel.unary_unary( + self._stubs["batch_create_sessions"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/BatchCreateSessions", request_serializer=spanner.BatchCreateSessionsRequest.serialize, response_deserializer=spanner.BatchCreateSessionsResponse.deserialize, @@ -348,7 +435,7 @@ def get_session( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_session" not in self._stubs: - self._stubs["get_session"] = self.grpc_channel.unary_unary( + self._stubs["get_session"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/GetSession", request_serializer=spanner.GetSessionRequest.serialize, response_deserializer=spanner.Session.deserialize, @@ -376,7 +463,7 @@ def list_sessions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_sessions" not in self._stubs: - self._stubs["list_sessions"] = self.grpc_channel.unary_unary( + self._stubs["list_sessions"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/ListSessions", request_serializer=spanner.ListSessionsRequest.serialize, response_deserializer=spanner.ListSessionsResponse.deserialize, @@ -404,7 +491,7 @@ def delete_session( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_session" not in self._stubs: - self._stubs["delete_session"] = self.grpc_channel.unary_unary( + self._stubs["delete_session"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/DeleteSession", request_serializer=spanner.DeleteSessionRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -443,7 +530,7 @@ def execute_sql( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_sql" not in self._stubs: - self._stubs["execute_sql"] = self.grpc_channel.unary_unary( + self._stubs["execute_sql"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/ExecuteSql", request_serializer=spanner.ExecuteSqlRequest.serialize, response_deserializer=result_set.ResultSet.deserialize, @@ -474,7 +561,7 @@ def execute_streaming_sql( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_streaming_sql" not in self._stubs: - self._stubs["execute_streaming_sql"] = self.grpc_channel.unary_stream( + self._stubs["execute_streaming_sql"] = self._logged_channel.unary_stream( "/google.spanner.v1.Spanner/ExecuteStreamingSql", request_serializer=spanner.ExecuteSqlRequest.serialize, response_deserializer=result_set.PartialResultSet.deserialize, @@ -515,7 +602,7 @@ def execute_batch_dml( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_batch_dml" not in self._stubs: - self._stubs["execute_batch_dml"] = self.grpc_channel.unary_unary( + self._stubs["execute_batch_dml"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/ExecuteBatchDml", request_serializer=spanner.ExecuteBatchDmlRequest.serialize, response_deserializer=spanner.ExecuteBatchDmlResponse.deserialize, @@ -553,7 +640,7 @@ def read(self) -> Callable[[spanner.ReadRequest], Awaitable[result_set.ResultSet # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read" not in self._stubs: - self._stubs["read"] = self.grpc_channel.unary_unary( + self._stubs["read"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/Read", request_serializer=spanner.ReadRequest.serialize, response_deserializer=result_set.ResultSet.deserialize, @@ -584,7 +671,7 @@ def streaming_read( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "streaming_read" not in self._stubs: - self._stubs["streaming_read"] = self.grpc_channel.unary_stream( + self._stubs["streaming_read"] = self._logged_channel.unary_stream( "/google.spanner.v1.Spanner/StreamingRead", request_serializer=spanner.ReadRequest.serialize, response_deserializer=result_set.PartialResultSet.deserialize, @@ -616,7 +703,7 @@ def begin_transaction( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "begin_transaction" not in self._stubs: - self._stubs["begin_transaction"] = self.grpc_channel.unary_unary( + self._stubs["begin_transaction"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/BeginTransaction", request_serializer=spanner.BeginTransactionRequest.serialize, response_deserializer=transaction.Transaction.deserialize, @@ -657,7 +744,7 @@ def commit( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "commit" not in self._stubs: - self._stubs["commit"] = self.grpc_channel.unary_unary( + self._stubs["commit"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/Commit", request_serializer=spanner.CommitRequest.serialize, response_deserializer=commit_response.CommitResponse.deserialize, @@ -692,7 +779,7 @@ def rollback( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "rollback" not in self._stubs: - self._stubs["rollback"] = self.grpc_channel.unary_unary( + self._stubs["rollback"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/Rollback", request_serializer=spanner.RollbackRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -733,7 +820,7 @@ def partition_query( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partition_query" not in self._stubs: - self._stubs["partition_query"] = self.grpc_channel.unary_unary( + self._stubs["partition_query"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/PartitionQuery", request_serializer=spanner.PartitionQueryRequest.serialize, response_deserializer=spanner.PartitionResponse.deserialize, @@ -775,7 +862,7 @@ def partition_read( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partition_read" not in self._stubs: - self._stubs["partition_read"] = self.grpc_channel.unary_unary( + self._stubs["partition_read"] = self._logged_channel.unary_unary( "/google.spanner.v1.Spanner/PartitionRead", request_serializer=spanner.PartitionReadRequest.serialize, response_deserializer=spanner.PartitionResponse.deserialize, @@ -819,7 +906,7 @@ def batch_write( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "batch_write" not in self._stubs: - self._stubs["batch_write"] = self.grpc_channel.unary_stream( + self._stubs["batch_write"] = self._logged_channel.unary_stream( "/google.spanner.v1.Spanner/BatchWrite", request_serializer=spanner.BatchWriteRequest.serialize, response_deserializer=spanner.BatchWriteResponse.deserialize, @@ -1047,7 +1134,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/spanner_v1/services/spanner/transports/rest.py b/google/cloud/spanner_v1/services/spanner/transports/rest.py index 6ca5e9eeed..344416c265 100644 --- a/google/cloud/spanner_v1/services/spanner/transports/rest.py +++ b/google/cloud/spanner_v1/services/spanner/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -35,9 +36,9 @@ from google.cloud.spanner_v1.types import result_set from google.cloud.spanner_v1.types import spanner from google.cloud.spanner_v1.types import transaction +from google.cloud.spanner_v1.metrics.metrics_interceptor import MetricsInterceptor from google.protobuf import empty_pb2 # type: ignore - from .rest_base import _BaseSpannerRestTransport from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO @@ -46,6 +47,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -198,8 +207,10 @@ def post_streaming_read(self, response): def pre_batch_create_sessions( self, request: spanner.BatchCreateSessionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner.BatchCreateSessionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner.BatchCreateSessionsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for batch_create_sessions Override in a subclass to manipulate the request or metadata @@ -212,15 +223,42 @@ def post_batch_create_sessions( ) -> spanner.BatchCreateSessionsResponse: """Post-rpc interceptor for batch_create_sessions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_create_sessions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_batch_create_sessions` interceptor runs + before the `post_batch_create_sessions_with_metadata` interceptor. """ return response + def post_batch_create_sessions_with_metadata( + self, + response: spanner.BatchCreateSessionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner.BatchCreateSessionsResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for batch_create_sessions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_batch_create_sessions_with_metadata` + interceptor in new development instead of the `post_batch_create_sessions` interceptor. + When both interceptors are used, this `post_batch_create_sessions_with_metadata` interceptor runs after the + `post_batch_create_sessions` interceptor. The (possibly modified) response returned by + `post_batch_create_sessions` will be passed to + `post_batch_create_sessions_with_metadata`. + """ + return response, metadata + def pre_batch_write( - self, request: spanner.BatchWriteRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.BatchWriteRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.BatchWriteRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.BatchWriteRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for batch_write Override in a subclass to manipulate the request or metadata @@ -233,17 +271,44 @@ def post_batch_write( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for batch_write - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_batch_write_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_batch_write` interceptor runs + before the `post_batch_write_with_metadata` interceptor. """ return response + def post_batch_write_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for batch_write + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_batch_write_with_metadata` + interceptor in new development instead of the `post_batch_write` interceptor. + When both interceptors are used, this `post_batch_write_with_metadata` interceptor runs after the + `post_batch_write` interceptor. The (possibly modified) response returned by + `post_batch_write` will be passed to + `post_batch_write_with_metadata`. + """ + return response, metadata + def pre_begin_transaction( self, request: spanner.BeginTransactionRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner.BeginTransactionRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner.BeginTransactionRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for begin_transaction Override in a subclass to manipulate the request or metadata @@ -256,15 +321,40 @@ def post_begin_transaction( ) -> transaction.Transaction: """Post-rpc interceptor for begin_transaction - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_begin_transaction_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_begin_transaction` interceptor runs + before the `post_begin_transaction_with_metadata` interceptor. """ return response + def post_begin_transaction_with_metadata( + self, + response: transaction.Transaction, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[transaction.Transaction, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for begin_transaction + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_begin_transaction_with_metadata` + interceptor in new development instead of the `post_begin_transaction` interceptor. + When both interceptors are used, this `post_begin_transaction_with_metadata` interceptor runs after the + `post_begin_transaction` interceptor. The (possibly modified) response returned by + `post_begin_transaction` will be passed to + `post_begin_transaction_with_metadata`. + """ + return response, metadata + def pre_commit( - self, request: spanner.CommitRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.CommitRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.CommitRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.CommitRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for commit Override in a subclass to manipulate the request or metadata @@ -277,15 +367,40 @@ def post_commit( ) -> commit_response.CommitResponse: """Post-rpc interceptor for commit - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_commit_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_commit` interceptor runs + before the `post_commit_with_metadata` interceptor. """ return response + def post_commit_with_metadata( + self, + response: commit_response.CommitResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[commit_response.CommitResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for commit + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_commit_with_metadata` + interceptor in new development instead of the `post_commit` interceptor. + When both interceptors are used, this `post_commit_with_metadata` interceptor runs after the + `post_commit` interceptor. The (possibly modified) response returned by + `post_commit` will be passed to + `post_commit_with_metadata`. + """ + return response, metadata + def pre_create_session( - self, request: spanner.CreateSessionRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.CreateSessionRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.CreateSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.CreateSessionRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for create_session Override in a subclass to manipulate the request or metadata @@ -296,15 +411,40 @@ def pre_create_session( def post_create_session(self, response: spanner.Session) -> spanner.Session: """Post-rpc interceptor for create_session - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_create_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_create_session` interceptor runs + before the `post_create_session_with_metadata` interceptor. """ return response + def post_create_session_with_metadata( + self, + response: spanner.Session, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.Session, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for create_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_create_session_with_metadata` + interceptor in new development instead of the `post_create_session` interceptor. + When both interceptors are used, this `post_create_session_with_metadata` interceptor runs after the + `post_create_session` interceptor. The (possibly modified) response returned by + `post_create_session` will be passed to + `post_create_session_with_metadata`. + """ + return response, metadata + def pre_delete_session( - self, request: spanner.DeleteSessionRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.DeleteSessionRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.DeleteSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.DeleteSessionRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for delete_session Override in a subclass to manipulate the request or metadata @@ -315,8 +455,8 @@ def pre_delete_session( def pre_execute_batch_dml( self, request: spanner.ExecuteBatchDmlRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner.ExecuteBatchDmlRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ExecuteBatchDmlRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for execute_batch_dml Override in a subclass to manipulate the request or metadata @@ -329,15 +469,42 @@ def post_execute_batch_dml( ) -> spanner.ExecuteBatchDmlResponse: """Post-rpc interceptor for execute_batch_dml - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_execute_batch_dml_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_execute_batch_dml` interceptor runs + before the `post_execute_batch_dml_with_metadata` interceptor. """ return response + def post_execute_batch_dml_with_metadata( + self, + response: spanner.ExecuteBatchDmlResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + spanner.ExecuteBatchDmlResponse, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for execute_batch_dml + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_execute_batch_dml_with_metadata` + interceptor in new development instead of the `post_execute_batch_dml` interceptor. + When both interceptors are used, this `post_execute_batch_dml_with_metadata` interceptor runs after the + `post_execute_batch_dml` interceptor. The (possibly modified) response returned by + `post_execute_batch_dml` will be passed to + `post_execute_batch_dml_with_metadata`. + """ + return response, metadata + def pre_execute_sql( - self, request: spanner.ExecuteSqlRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.ExecuteSqlRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.ExecuteSqlRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ExecuteSqlRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for execute_sql Override in a subclass to manipulate the request or metadata @@ -348,15 +515,40 @@ def pre_execute_sql( def post_execute_sql(self, response: result_set.ResultSet) -> result_set.ResultSet: """Post-rpc interceptor for execute_sql - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_execute_sql_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_execute_sql` interceptor runs + before the `post_execute_sql_with_metadata` interceptor. """ return response + def post_execute_sql_with_metadata( + self, + response: result_set.ResultSet, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[result_set.ResultSet, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for execute_sql + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_execute_sql_with_metadata` + interceptor in new development instead of the `post_execute_sql` interceptor. + When both interceptors are used, this `post_execute_sql_with_metadata` interceptor runs after the + `post_execute_sql` interceptor. The (possibly modified) response returned by + `post_execute_sql` will be passed to + `post_execute_sql_with_metadata`. + """ + return response, metadata + def pre_execute_streaming_sql( - self, request: spanner.ExecuteSqlRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.ExecuteSqlRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.ExecuteSqlRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ExecuteSqlRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for execute_streaming_sql Override in a subclass to manipulate the request or metadata @@ -369,15 +561,42 @@ def post_execute_streaming_sql( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for execute_streaming_sql - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_execute_streaming_sql_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_execute_streaming_sql` interceptor runs + before the `post_execute_streaming_sql_with_metadata` interceptor. """ return response + def post_execute_streaming_sql_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for execute_streaming_sql + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_execute_streaming_sql_with_metadata` + interceptor in new development instead of the `post_execute_streaming_sql` interceptor. + When both interceptors are used, this `post_execute_streaming_sql_with_metadata` interceptor runs after the + `post_execute_streaming_sql` interceptor. The (possibly modified) response returned by + `post_execute_streaming_sql` will be passed to + `post_execute_streaming_sql_with_metadata`. + """ + return response, metadata + def pre_get_session( - self, request: spanner.GetSessionRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.GetSessionRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.GetSessionRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.GetSessionRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for get_session Override in a subclass to manipulate the request or metadata @@ -388,15 +607,40 @@ def pre_get_session( def post_get_session(self, response: spanner.Session) -> spanner.Session: """Post-rpc interceptor for get_session - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_get_session_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_get_session` interceptor runs + before the `post_get_session_with_metadata` interceptor. """ return response + def post_get_session_with_metadata( + self, + response: spanner.Session, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.Session, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for get_session + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_get_session_with_metadata` + interceptor in new development instead of the `post_get_session` interceptor. + When both interceptors are used, this `post_get_session_with_metadata` interceptor runs after the + `post_get_session` interceptor. The (possibly modified) response returned by + `post_get_session` will be passed to + `post_get_session_with_metadata`. + """ + return response, metadata + def pre_list_sessions( - self, request: spanner.ListSessionsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.ListSessionsRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.ListSessionsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ListSessionsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for list_sessions Override in a subclass to manipulate the request or metadata @@ -409,17 +653,40 @@ def post_list_sessions( ) -> spanner.ListSessionsResponse: """Post-rpc interceptor for list_sessions - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_list_sessions_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_list_sessions` interceptor runs + before the `post_list_sessions_with_metadata` interceptor. """ return response + def post_list_sessions_with_metadata( + self, + response: spanner.ListSessionsResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ListSessionsResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for list_sessions + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_list_sessions_with_metadata` + interceptor in new development instead of the `post_list_sessions` interceptor. + When both interceptors are used, this `post_list_sessions_with_metadata` interceptor runs after the + `post_list_sessions` interceptor. The (possibly modified) response returned by + `post_list_sessions` will be passed to + `post_list_sessions_with_metadata`. + """ + return response, metadata + def pre_partition_query( self, request: spanner.PartitionQueryRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[spanner.PartitionQueryRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.PartitionQueryRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for partition_query Override in a subclass to manipulate the request or metadata @@ -432,15 +699,40 @@ def post_partition_query( ) -> spanner.PartitionResponse: """Post-rpc interceptor for partition_query - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_partition_query_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_partition_query` interceptor runs + before the `post_partition_query_with_metadata` interceptor. """ return response + def post_partition_query_with_metadata( + self, + response: spanner.PartitionResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.PartitionResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for partition_query + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_partition_query_with_metadata` + interceptor in new development instead of the `post_partition_query` interceptor. + When both interceptors are used, this `post_partition_query_with_metadata` interceptor runs after the + `post_partition_query` interceptor. The (possibly modified) response returned by + `post_partition_query` will be passed to + `post_partition_query_with_metadata`. + """ + return response, metadata + def pre_partition_read( - self, request: spanner.PartitionReadRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.PartitionReadRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.PartitionReadRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.PartitionReadRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for partition_read Override in a subclass to manipulate the request or metadata @@ -453,15 +745,40 @@ def post_partition_read( ) -> spanner.PartitionResponse: """Post-rpc interceptor for partition_read - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_partition_read_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_partition_read` interceptor runs + before the `post_partition_read_with_metadata` interceptor. """ return response + def post_partition_read_with_metadata( + self, + response: spanner.PartitionResponse, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.PartitionResponse, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for partition_read + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_partition_read_with_metadata` + interceptor in new development instead of the `post_partition_read` interceptor. + When both interceptors are used, this `post_partition_read_with_metadata` interceptor runs after the + `post_partition_read` interceptor. The (possibly modified) response returned by + `post_partition_read` will be passed to + `post_partition_read_with_metadata`. + """ + return response, metadata + def pre_read( - self, request: spanner.ReadRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.ReadRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.ReadRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ReadRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for read Override in a subclass to manipulate the request or metadata @@ -472,15 +789,40 @@ def pre_read( def post_read(self, response: result_set.ResultSet) -> result_set.ResultSet: """Post-rpc interceptor for read - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_read_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_read` interceptor runs + before the `post_read_with_metadata` interceptor. """ return response + def post_read_with_metadata( + self, + response: result_set.ResultSet, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[result_set.ResultSet, Sequence[Tuple[str, Union[str, bytes]]]]: + """Post-rpc interceptor for read + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_read_with_metadata` + interceptor in new development instead of the `post_read` interceptor. + When both interceptors are used, this `post_read_with_metadata` interceptor runs after the + `post_read` interceptor. The (possibly modified) response returned by + `post_read` will be passed to + `post_read_with_metadata`. + """ + return response, metadata + def pre_rollback( - self, request: spanner.RollbackRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.RollbackRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.RollbackRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.RollbackRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for rollback Override in a subclass to manipulate the request or metadata @@ -489,8 +831,10 @@ def pre_rollback( return request, metadata def pre_streaming_read( - self, request: spanner.ReadRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[spanner.ReadRequest, Sequence[Tuple[str, str]]]: + self, + request: spanner.ReadRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[spanner.ReadRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for streaming_read Override in a subclass to manipulate the request or metadata @@ -503,12 +847,37 @@ def post_streaming_read( ) -> rest_streaming.ResponseIterator: """Post-rpc interceptor for streaming_read - Override in a subclass to manipulate the response + DEPRECATED. Please use the `post_streaming_read_with_metadata` + interceptor instead. + + Override in a subclass to read or manipulate the response after it is returned by the Spanner server but before - it is returned to user code. + it is returned to user code. This `post_streaming_read` interceptor runs + before the `post_streaming_read_with_metadata` interceptor. """ return response + def post_streaming_read_with_metadata( + self, + response: rest_streaming.ResponseIterator, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + rest_streaming.ResponseIterator, Sequence[Tuple[str, Union[str, bytes]]] + ]: + """Post-rpc interceptor for streaming_read + + Override in a subclass to read or manipulate the response or metadata after it + is returned by the Spanner server but before it is returned to user code. + + We recommend only using this `post_streaming_read_with_metadata` + interceptor in new development instead of the `post_streaming_read` interceptor. + When both interceptors are used, this `post_streaming_read_with_metadata` interceptor runs after the + `post_streaming_read` interceptor. The (possibly modified) response returned by + `post_streaming_read` will be passed to + `post_streaming_read_with_metadata`. + """ + return response, metadata + @dataclasses.dataclass class SpannerRestStub: @@ -546,6 +915,7 @@ def __init__( url_scheme: str = "https", interceptor: Optional[SpannerRestInterceptor] = None, api_audience: Optional[str] = None, + metrics_interceptor: Optional[MetricsInterceptor] = None, ) -> None: """Instantiate the transport. @@ -634,7 +1004,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.BatchCreateSessionsResponse: r"""Call the batch create sessions method over HTTP. @@ -645,8 +1015,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.BatchCreateSessionsResponse: @@ -658,6 +1030,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseBatchCreateSessions._get_http_options() ) + request, metadata = self._interceptor.pre_batch_create_sessions( request, metadata ) @@ -674,6 +1047,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.BatchCreateSessions", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "BatchCreateSessions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._BatchCreateSessions._get_response( self._host, @@ -695,7 +1095,35 @@ def __call__( pb_resp = spanner.BatchCreateSessionsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_batch_create_sessions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_create_sessions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.BatchCreateSessionsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.batch_create_sessions", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "BatchCreateSessions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _BatchWrite(_BaseSpannerRestTransport._BaseBatchWrite, SpannerRestStub): @@ -732,7 +1160,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the batch write method over HTTP. @@ -743,8 +1171,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.BatchWriteResponse: @@ -754,6 +1184,7 @@ def __call__( """ http_options = _BaseSpannerRestTransport._BaseBatchWrite._get_http_options() + request, metadata = self._interceptor.pre_batch_write(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseBatchWrite._get_transcoded_request( @@ -772,6 +1203,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.BatchWrite", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "BatchWrite", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._BatchWrite._get_response( self._host, @@ -790,7 +1248,12 @@ def __call__( # Return the response resp = rest_streaming.ResponseIterator(response, spanner.BatchWriteResponse) + resp = self._interceptor.post_batch_write(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_batch_write_with_metadata( + resp, response_metadata + ) return resp class _BeginTransaction( @@ -828,7 +1291,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> transaction.Transaction: r"""Call the begin transaction method over HTTP. @@ -839,8 +1302,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.transaction.Transaction: @@ -850,6 +1315,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseBeginTransaction._get_http_options() ) + request, metadata = self._interceptor.pre_begin_transaction( request, metadata ) @@ -872,6 +1338,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.BeginTransaction", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "BeginTransaction", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._BeginTransaction._get_response( self._host, @@ -893,7 +1386,33 @@ def __call__( pb_resp = transaction.Transaction.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_begin_transaction(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_begin_transaction_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = transaction.Transaction.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.begin_transaction", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "BeginTransaction", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _Commit(_BaseSpannerRestTransport._BaseCommit, SpannerRestStub): @@ -929,7 +1448,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> commit_response.CommitResponse: r"""Call the commit method over HTTP. @@ -940,8 +1459,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.commit_response.CommitResponse: @@ -951,6 +1472,7 @@ def __call__( """ http_options = _BaseSpannerRestTransport._BaseCommit._get_http_options() + request, metadata = self._interceptor.pre_commit(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseCommit._get_transcoded_request( @@ -967,6 +1489,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.Commit", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "Commit", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._Commit._get_response( self._host, @@ -988,7 +1537,33 @@ def __call__( pb_resp = commit_response.CommitResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_commit(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_commit_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = commit_response.CommitResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.commit", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "Commit", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateSession(_BaseSpannerRestTransport._BaseCreateSession, SpannerRestStub): @@ -1024,7 +1599,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.Session: r"""Call the create session method over HTTP. @@ -1035,8 +1610,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.Session: @@ -1046,6 +1623,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseCreateSession._get_http_options() ) + request, metadata = self._interceptor.pre_create_session(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseCreateSession._get_transcoded_request( @@ -1064,6 +1642,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.CreateSession", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "CreateSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._CreateSession._get_response( self._host, @@ -1085,7 +1690,33 @@ def __call__( pb_resp = spanner.Session.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_create_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.Session.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.create_session", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "CreateSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteSession(_BaseSpannerRestTransport._BaseDeleteSession, SpannerRestStub): @@ -1120,7 +1751,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete session method over HTTP. @@ -1131,13 +1762,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseSpannerRestTransport._BaseDeleteSession._get_http_options() ) + request, metadata = self._interceptor.pre_delete_session(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseDeleteSession._get_transcoded_request( @@ -1152,6 +1786,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.DeleteSession", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "DeleteSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._DeleteSession._get_response( self._host, @@ -1202,7 +1863,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.ExecuteBatchDmlResponse: r"""Call the execute batch dml method over HTTP. @@ -1213,8 +1874,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.ExecuteBatchDmlResponse: @@ -1262,6 +1925,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseExecuteBatchDml._get_http_options() ) + request, metadata = self._interceptor.pre_execute_batch_dml( request, metadata ) @@ -1284,6 +1948,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.ExecuteBatchDml", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ExecuteBatchDml", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._ExecuteBatchDml._get_response( self._host, @@ -1305,7 +1996,33 @@ def __call__( pb_resp = spanner.ExecuteBatchDmlResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_execute_batch_dml(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_execute_batch_dml_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.ExecuteBatchDmlResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.execute_batch_dml", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ExecuteBatchDml", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExecuteSql(_BaseSpannerRestTransport._BaseExecuteSql, SpannerRestStub): @@ -1341,7 +2058,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> result_set.ResultSet: r"""Call the execute sql method over HTTP. @@ -1353,8 +2070,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.result_set.ResultSet: @@ -1364,6 +2083,7 @@ def __call__( """ http_options = _BaseSpannerRestTransport._BaseExecuteSql._get_http_options() + request, metadata = self._interceptor.pre_execute_sql(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseExecuteSql._get_transcoded_request( @@ -1382,6 +2102,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.ExecuteSql", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ExecuteSql", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._ExecuteSql._get_response( self._host, @@ -1403,7 +2150,33 @@ def __call__( pb_resp = result_set.ResultSet.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_execute_sql(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_execute_sql_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = result_set.ResultSet.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.execute_sql", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ExecuteSql", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExecuteStreamingSql( @@ -1442,7 +2215,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the execute streaming sql method over HTTP. @@ -1454,8 +2227,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.result_set.PartialResultSet: @@ -1470,6 +2245,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseExecuteStreamingSql._get_http_options() ) + request, metadata = self._interceptor.pre_execute_streaming_sql( request, metadata ) @@ -1486,6 +2262,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.ExecuteStreamingSql", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ExecuteStreamingSql", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._ExecuteStreamingSql._get_response( self._host, @@ -1506,7 +2309,12 @@ def __call__( resp = rest_streaming.ResponseIterator( response, result_set.PartialResultSet ) + resp = self._interceptor.post_execute_streaming_sql(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_execute_streaming_sql_with_metadata( + resp, response_metadata + ) return resp class _GetSession(_BaseSpannerRestTransport._BaseGetSession, SpannerRestStub): @@ -1541,7 +2349,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.Session: r"""Call the get session method over HTTP. @@ -1552,8 +2360,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.Session: @@ -1561,6 +2371,7 @@ def __call__( """ http_options = _BaseSpannerRestTransport._BaseGetSession._get_http_options() + request, metadata = self._interceptor.pre_get_session(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseGetSession._get_transcoded_request( @@ -1575,6 +2386,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.GetSession", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "GetSession", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._GetSession._get_response( self._host, @@ -1595,7 +2433,33 @@ def __call__( pb_resp = spanner.Session.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_session(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_get_session_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.Session.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.get_session", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "GetSession", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListSessions(_BaseSpannerRestTransport._BaseListSessions, SpannerRestStub): @@ -1630,7 +2494,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.ListSessionsResponse: r"""Call the list sessions method over HTTP. @@ -1641,8 +2505,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.ListSessionsResponse: @@ -1654,6 +2520,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseListSessions._get_http_options() ) + request, metadata = self._interceptor.pre_list_sessions(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseListSessions._get_transcoded_request( @@ -1668,6 +2535,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.ListSessions", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ListSessions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._ListSessions._get_response( self._host, @@ -1688,7 +2582,33 @@ def __call__( pb_resp = spanner.ListSessionsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_sessions(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_list_sessions_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.ListSessionsResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.list_sessions", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "ListSessions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PartitionQuery( @@ -1726,7 +2646,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.PartitionResponse: r"""Call the partition query method over HTTP. @@ -1737,8 +2657,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.PartitionResponse: @@ -1752,6 +2674,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BasePartitionQuery._get_http_options() ) + request, metadata = self._interceptor.pre_partition_query(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BasePartitionQuery._get_transcoded_request( @@ -1770,6 +2693,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.PartitionQuery", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "PartitionQuery", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._PartitionQuery._get_response( self._host, @@ -1791,7 +2741,33 @@ def __call__( pb_resp = spanner.PartitionResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partition_query(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_partition_query_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.PartitionResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.partition_query", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "PartitionQuery", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PartitionRead(_BaseSpannerRestTransport._BasePartitionRead, SpannerRestStub): @@ -1827,7 +2803,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> spanner.PartitionResponse: r"""Call the partition read method over HTTP. @@ -1838,8 +2814,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.spanner.PartitionResponse: @@ -1853,6 +2831,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BasePartitionRead._get_http_options() ) + request, metadata = self._interceptor.pre_partition_read(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BasePartitionRead._get_transcoded_request( @@ -1871,6 +2850,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.PartitionRead", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "PartitionRead", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._PartitionRead._get_response( self._host, @@ -1892,7 +2898,33 @@ def __call__( pb_resp = spanner.PartitionResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partition_read(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_partition_read_with_metadata( + resp, response_metadata + ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = spanner.PartitionResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.partition_read", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "PartitionRead", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _Read(_BaseSpannerRestTransport._BaseRead, SpannerRestStub): @@ -1928,7 +2960,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> result_set.ResultSet: r"""Call the read method over HTTP. @@ -1940,8 +2972,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.result_set.ResultSet: @@ -1951,6 +2985,7 @@ def __call__( """ http_options = _BaseSpannerRestTransport._BaseRead._get_http_options() + request, metadata = self._interceptor.pre_read(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseRead._get_transcoded_request( @@ -1967,6 +3002,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.Read", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "Read", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._Read._get_response( self._host, @@ -1988,7 +3050,31 @@ def __call__( pb_resp = result_set.ResultSet.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_read(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_read_with_metadata(resp, response_metadata) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = result_set.ResultSet.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.spanner_v1.SpannerClient.read", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "Read", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _Rollback(_BaseSpannerRestTransport._BaseRollback, SpannerRestStub): @@ -2024,7 +3110,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the rollback method over HTTP. @@ -2035,11 +3121,14 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = _BaseSpannerRestTransport._BaseRollback._get_http_options() + request, metadata = self._interceptor.pre_rollback(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseRollback._get_transcoded_request( @@ -2058,6 +3147,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.Rollback", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "Rollback", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._Rollback._get_response( self._host, @@ -2108,7 +3224,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the streaming read method over HTTP. @@ -2120,8 +3236,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.result_set.PartialResultSet: @@ -2136,6 +3254,7 @@ def __call__( http_options = ( _BaseSpannerRestTransport._BaseStreamingRead._get_http_options() ) + request, metadata = self._interceptor.pre_streaming_read(request, metadata) transcoded_request = ( _BaseSpannerRestTransport._BaseStreamingRead._get_transcoded_request( @@ -2154,6 +3273,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.spanner_v1.SpannerClient.StreamingRead", + extra={ + "serviceName": "google.spanner.v1.Spanner", + "rpcName": "StreamingRead", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = SpannerRestTransport._StreamingRead._get_response( self._host, @@ -2174,7 +3320,12 @@ def __call__( resp = rest_streaming.ResponseIterator( response, result_set.PartialResultSet ) + resp = self._interceptor.post_streaming_read(resp) + response_metadata = [(k, str(v)) for k, v in response.headers.items()] + resp, _ = self._interceptor.post_streaming_read_with_metadata( + resp, response_metadata + ) return resp @property diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index ccc0c4ebdc..f18ba57582 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -39,6 +39,7 @@ from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.snapshot import Snapshot from google.cloud.spanner_v1.transaction import Transaction +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture DEFAULT_RETRY_TIMEOUT_SECS = 30 @@ -165,7 +166,8 @@ def create(self): self, self._labels, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): session_pb = api.create_session( request=request, metadata=metadata, @@ -204,8 +206,11 @@ def exists(self): observability_options = getattr(self._database, "observability_options", None) with trace_call( - "CloudSpanner.GetSession", self, observability_options=observability_options - ) as span: + "CloudSpanner.GetSession", + self, + observability_options=observability_options, + metadata=metadata, + ) as span, MetricsCapture(): try: api.get_session(name=self.name, metadata=metadata) if span: @@ -248,7 +253,8 @@ def delete(self): "session.name": self.name, }, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): api.delete_session(name=self.name, metadata=metadata) def ping(self): @@ -446,6 +452,7 @@ def run_in_transaction(self, func, *args, **kw): from being recorded in change streams with the DDL option `allow_txn_exclusion=true`. This does not exclude the transaction from being recorded in the change streams with the DDL option `allow_txn_exclusion` being false or unset. + "isolation_level" sets the isolation level for the transaction. :rtype: Any :returns: The return value of ``func``. @@ -460,6 +467,8 @@ def run_in_transaction(self, func, *args, **kw): exclude_txn_from_change_streams = kw.pop( "exclude_txn_from_change_streams", None ) + isolation_level = kw.pop("isolation_level", None) + attempts = 0 observability_options = getattr(self._database, "observability_options", None) @@ -467,7 +476,7 @@ def run_in_transaction(self, func, *args, **kw): "CloudSpanner.Session.run_in_transaction", self, observability_options=observability_options, - ) as span: + ) as span, MetricsCapture(): while True: if self._transaction is None: txn = self.transaction() @@ -475,6 +484,7 @@ def run_in_transaction(self, func, *args, **kw): txn.exclude_txn_from_change_streams = ( exclude_txn_from_change_streams ) + txn.isolation_level = isolation_level else: txn = self._transaction diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index f9edbe96fa..3b18d2c855 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -43,6 +43,8 @@ from google.cloud.spanner_v1.streamed import StreamedResultSet from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture + _STREAM_RESUMPTION_INTERNAL_ERROR_MESSAGES = ( "RST_STREAM", "Received unexpected EOS on DATA frame from server", @@ -52,6 +54,7 @@ def _restart_on_unavailable( method, request, + metadata=None, trace_name=None, session=None, attributes=None, @@ -96,8 +99,9 @@ def _restart_on_unavailable( session, attributes, observability_options=observability_options, - ): - iterator = method(request=request) + metadata=metadata, + ), MetricsCapture(): + iterator = method(request=request, metadata=metadata) for item in iterator: item_buffer.append(item) # Setting the transaction id because the transaction begin was inlined for first rpc. @@ -119,7 +123,8 @@ def _restart_on_unavailable( session, attributes, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): request.resume_token = resume_token if transaction is not None: transaction_selector = transaction._make_txn_selector() @@ -139,7 +144,8 @@ def _restart_on_unavailable( session, attributes, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): request.resume_token = resume_token if transaction is not None: transaction_selector = transaction._make_txn_selector() @@ -340,6 +346,7 @@ def read( iterator = _restart_on_unavailable( restart, request, + metadata, f"CloudSpanner.{type(self).__name__}.read", self._session, trace_attributes, @@ -362,6 +369,7 @@ def read( iterator = _restart_on_unavailable( restart, request, + metadata, f"CloudSpanner.{type(self).__name__}.read", self._session, trace_attributes, @@ -389,6 +397,7 @@ def execute_sql( query_mode=None, query_options=None, request_options=None, + last_statement=False, partition=None, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -432,6 +441,19 @@ def execute_sql( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.RequestOptions`. + :type last_statement: bool + :param last_statement: + If set to true, this option marks the end of the transaction. The + transaction should be committed or aborted after this statement + executes, and attempts to execute any other requests against this + transaction (including reads and queries) will be rejected. Mixing + mutations with statements that are marked as the last statement is + not allowed. + For DML statements, setting this option may cause some error + reporting to be deferred until commit time (e.g. validation of + unique constraints). Given this, successful execution of a DML + statement should not be assumed until the transaction commits. + :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. @@ -536,6 +558,7 @@ def execute_sql( seqno=self._execute_sql_count, query_options=query_options, request_options=request_options, + last_statement=last_statement, data_boost_enabled=data_boost_enabled, directed_read_options=directed_read_options, ) @@ -556,6 +579,7 @@ def execute_sql( return self._get_streamed_result_set( restart, request, + metadata, trace_attributes, column_info, observability_options, @@ -565,6 +589,7 @@ def execute_sql( return self._get_streamed_result_set( restart, request, + metadata, trace_attributes, column_info, observability_options, @@ -575,6 +600,7 @@ def _get_streamed_result_set( self, restart, request, + metadata, trace_attributes, column_info, observability_options=None, @@ -583,6 +609,7 @@ def _get_streamed_result_set( iterator = _restart_on_unavailable( restart, request, + metadata, f"CloudSpanner.{type(self).__name__}.execute_sql", self._session, trace_attributes, @@ -689,7 +716,8 @@ def partition_read( self._session, extra_attributes=trace_attributes, observability_options=getattr(database, "observability_options", None), - ): + metadata=metadata, + ), MetricsCapture(): method = functools.partial( api.partition_read, request=request, @@ -792,7 +820,8 @@ def partition_query( self._session, trace_attributes, observability_options=getattr(database, "observability_options", None), - ): + metadata=metadata, + ), MetricsCapture(): method = functools.partial( api.partition_query, request=request, @@ -938,7 +967,8 @@ def begin(self): f"CloudSpanner.{type(self).__name__}.begin", self._session, observability_options=getattr(database, "observability_options", None), - ): + metadata=metadata, + ), MetricsCapture(): method = functools.partial( api.begin_transaction, session=self._session.name, diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index cc59789248..2f52aaa144 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -16,6 +16,7 @@ import functools import threading from google.protobuf.struct_pb2 import Struct +from typing import Optional from google.cloud.spanner_v1._helpers import ( _make_value_pb, @@ -24,6 +25,7 @@ _metadata_with_leader_aware_routing, _retry, _check_rst_stream_error, + _merge_Transaction_Options, ) from google.cloud.spanner_v1 import CommitRequest from google.cloud.spanner_v1 import ExecuteBatchDmlRequest @@ -34,9 +36,10 @@ from google.cloud.spanner_v1.batch import _BatchBase from google.cloud.spanner_v1._opentelemetry_tracing import add_span_event, trace_call from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture from google.api_core import gapic_v1 from google.api_core.exceptions import InternalServerError -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import Any @@ -58,6 +61,7 @@ class Transaction(_SnapshotBase, _BatchBase): _lock = threading.Lock() _read_only = False exclude_txn_from_change_streams = False + isolation_level = TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED def __init__(self, session): if session._transaction is not None: @@ -88,12 +92,17 @@ def _make_txn_selector(self): self._check_state() if self._transaction_id is None: - return TransactionSelector( - begin=TransactionOptions( - read_write=TransactionOptions.ReadWrite(), - exclude_txn_from_change_streams=self.exclude_txn_from_change_streams, - ) + txn_options = TransactionOptions( + read_write=TransactionOptions.ReadWrite(), + exclude_txn_from_change_streams=self.exclude_txn_from_change_streams, + isolation_level=self.isolation_level, + ) + + txn_options = _merge_Transaction_Options( + self._session._database.default_transaction_options.default_read_write_transaction_options, + txn_options, ) + return TransactionSelector(begin=txn_options) else: return TransactionSelector(id=self._transaction_id) @@ -101,6 +110,7 @@ def _execute_request( self, method, request, + metadata, trace_name=None, session=None, attributes=None, @@ -117,8 +127,12 @@ def _execute_request( transaction = self._make_txn_selector() request.transaction = transaction with trace_call( - trace_name, session, attributes, observability_options=observability_options - ): + trace_name, + session, + attributes, + observability_options=observability_options, + metadata=metadata, + ), MetricsCapture(): method = functools.partial(method, request=request) response = _retry( method, @@ -154,13 +168,19 @@ def begin(self): txn_options = TransactionOptions( read_write=TransactionOptions.ReadWrite(), exclude_txn_from_change_streams=self.exclude_txn_from_change_streams, + isolation_level=self.isolation_level, + ) + txn_options = _merge_Transaction_Options( + database.default_transaction_options.default_read_write_transaction_options, + txn_options, ) observability_options = getattr(database, "observability_options", None) with trace_call( f"CloudSpanner.{type(self).__name__}.begin", self._session, observability_options=observability_options, - ) as span: + metadata=metadata, + ) as span, MetricsCapture(): method = functools.partial( api.begin_transaction, session=self._session.name, @@ -202,7 +222,8 @@ def rollback(self): f"CloudSpanner.{type(self).__name__}.rollback", self._session, observability_options=observability_options, - ): + metadata=metadata, + ), MetricsCapture(): method = functools.partial( api.rollback, session=self._session.name, @@ -245,27 +266,25 @@ def commit( database = self._session._database trace_attributes = {"num_mutations": len(self._mutations)} observability_options = getattr(database, "observability_options", None) + api = database.spanner_api + metadata = _metadata_with_prefix(database.name) + if database._route_to_leader_enabled: + metadata.append( + _metadata_with_leader_aware_routing(database._route_to_leader_enabled) + ) with trace_call( f"CloudSpanner.{type(self).__name__}.commit", self._session, trace_attributes, observability_options, - ) as span: + metadata=metadata, + ) as span, MetricsCapture(): self._check_state() if self._transaction_id is None and len(self._mutations) > 0: self.begin() elif self._transaction_id is None and len(self._mutations) == 0: raise ValueError("Transaction is not begun") - api = database.spanner_api - metadata = _metadata_with_prefix(database.name) - if database._route_to_leader_enabled: - metadata.append( - _metadata_with_leader_aware_routing( - database._route_to_leader_enabled - ) - ) - if request_options is None: request_options = RequestOptions() elif type(request_options) is dict: @@ -349,6 +368,7 @@ def execute_update( query_mode=None, query_options=None, request_options=None, + last_statement=False, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -385,6 +405,19 @@ def execute_update( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.RequestOptions`. + :type last_statement: bool + :param last_statement: + If set to true, this option marks the end of the transaction. The + transaction should be committed or aborted after this statement + executes, and attempts to execute any other requests against this + transaction (including reads and queries) will be rejected. Mixing + mutations with statements that are marked as the last statement is + not allowed. + For DML statements, setting this option may cause some error + reporting to be deferred until commit time (e.g. validation of + unique constraints). Given this, successful execution of a DML + statement should not be assumed until the transaction commits. + :type retry: :class:`~google.api_core.retry.Retry` :param retry: (Optional) The retry settings for this request. @@ -433,6 +466,7 @@ def execute_update( query_options=query_options, seqno=seqno, request_options=request_options, + last_statement=last_statement, ) method = functools.partial( @@ -449,6 +483,7 @@ def execute_update( response = self._execute_request( method, request, + metadata, f"CloudSpanner.{type(self).__name__}.execute_update", self._session, trace_attributes, @@ -466,6 +501,7 @@ def execute_update( response = self._execute_request( method, request, + metadata, f"CloudSpanner.{type(self).__name__}.execute_update", self._session, trace_attributes, @@ -478,6 +514,7 @@ def batch_update( self, statements, request_options=None, + last_statement=False, *, retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, @@ -502,6 +539,19 @@ def batch_update( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.RequestOptions`. + :type last_statement: bool + :param last_statement: + If set to true, this option marks the end of the transaction. The + transaction should be committed or aborted after this statement + executes, and attempts to execute any other requests against this + transaction (including reads and queries) will be rejected. Mixing + mutations with statements that are marked as the last statement is + not allowed. + For DML statements, setting this option may cause some error + reporting to be deferred until commit time (e.g. validation of + unique constraints). Given this, successful execution of a DML + statement should not be assumed until the transaction commits. + :type retry: :class:`~google.api_core.retry.Retry` :param retry: (Optional) The retry settings for this request. @@ -558,6 +608,7 @@ def batch_update( statements=parsed, seqno=seqno, request_options=request_options, + last_statements=last_statement, ) method = functools.partial( @@ -574,6 +625,7 @@ def batch_update( response = self._execute_request( method, request, + metadata, "CloudSpanner.DMLTransaction", self._session, trace_attributes, @@ -592,6 +644,7 @@ def batch_update( response = self._execute_request( method, request, + metadata, "CloudSpanner.DMLTransaction", self._session, trace_attributes, @@ -621,3 +674,22 @@ class BatchTransactionId: transaction_id: str session_id: str read_timestamp: Any + + +@dataclass +class DefaultTransactionOptions: + isolation_level: str = TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED + _defaultReadWriteTransactionOptions: Optional[TransactionOptions] = field( + init=False, repr=False + ) + + def __post_init__(self): + """Initialize _defaultReadWriteTransactionOptions automatically""" + self._defaultReadWriteTransactionOptions = TransactionOptions( + isolation_level=self.isolation_level + ) + + @property + def default_read_write_transaction_options(self) -> TransactionOptions: + """Public accessor for _defaultReadWriteTransactionOptions""" + return self._defaultReadWriteTransactionOptions diff --git a/google/cloud/spanner_v1/types/spanner.py b/google/cloud/spanner_v1/types/spanner.py index dedc82096d..978362d357 100644 --- a/google/cloud/spanner_v1/types/spanner.py +++ b/google/cloud/spanner_v1/types/spanner.py @@ -651,6 +651,20 @@ class ExecuteSqlRequest(proto.Message): If the field is set to ``true`` but the request does not set ``partition_token``, the API returns an ``INVALID_ARGUMENT`` error. + last_statement (bool): + Optional. If set to true, this statement + marks the end of the transaction. The + transaction should be committed or aborted after + this statement executes, and attempts to execute + any other requests against this transaction + (including reads and queries) will be rejected. + + For DML statements, setting this option may + cause some error reporting to be deferred until + commit time (e.g. validation of unique + constraints). Given this, successful execution + of a DML statement should not be assumed until a + subsequent Commit call completes successfully. """ class QueryMode(proto.Enum): @@ -813,6 +827,10 @@ class QueryOptions(proto.Message): proto.BOOL, number=16, ) + last_statement: bool = proto.Field( + proto.BOOL, + number=17, + ) class ExecuteBatchDmlRequest(proto.Message): @@ -854,6 +872,20 @@ class ExecuteBatchDmlRequest(proto.Message): yield the same response as the first execution. request_options (google.cloud.spanner_v1.types.RequestOptions): Common options for this request. + last_statements (bool): + Optional. If set to true, this request marks + the end of the transaction. The transaction + should be committed or aborted after these + statements execute, and attempts to execute any + other requests against this transaction + (including reads and queries) will be rejected. + + Setting this option may cause some error + reporting to be deferred until commit time (e.g. + validation of unique constraints). Given this, + successful execution of statements should not be + assumed until a subsequent Commit call completes + successfully. """ class Statement(proto.Message): @@ -932,6 +964,10 @@ class Statement(proto.Message): number=5, message="RequestOptions", ) + last_statements: bool = proto.Field( + proto.BOOL, + number=6, + ) class ExecuteBatchDmlResponse(proto.Message): diff --git a/google/cloud/spanner_v1/types/transaction.py b/google/cloud/spanner_v1/types/transaction.py index 6599d26172..0a25f1ea15 100644 --- a/google/cloud/spanner_v1/types/transaction.py +++ b/google/cloud/spanner_v1/types/transaction.py @@ -419,8 +419,52 @@ class TransactionOptions(proto.Message): only be specified for read-write or partitioned-dml transactions, otherwise the API will return an ``INVALID_ARGUMENT`` error. + isolation_level (google.cloud.spanner_v1.types.TransactionOptions.IsolationLevel): + Isolation level for the transaction. """ + class IsolationLevel(proto.Enum): + r"""``IsolationLevel`` is used when setting ``isolation_level`` for a + transaction. + + Values: + ISOLATION_LEVEL_UNSPECIFIED (0): + Default value. + + If the value is not specified, the ``SERIALIZABLE`` + isolation level is used. + SERIALIZABLE (1): + All transactions appear as if they executed + in a serial order, even if some of the reads, + writes, and other operations of distinct + transactions actually occurred in parallel. + Spanner assigns commit timestamps that reflect + the order of committed transactions to implement + this property. Spanner offers a stronger + guarantee than serializability called external + consistency. For further details, please refer + to + https://cloud.google.com/spanner/docs/true-time-external-consistency#serializability. + REPEATABLE_READ (2): + All reads performed during the transaction observe a + consistent snapshot of the database, and the transaction + will only successfully commit in the absence of conflicts + between its updates and any concurrent updates that have + occurred since that snapshot. Consequently, in contrast to + ``SERIALIZABLE`` transactions, only write-write conflicts + are detected in snapshot transactions. + + This isolation level does not support Read-only and + Partitioned DML transactions. + + When ``REPEATABLE_READ`` is specified on a read-write + transaction, the locking semantics default to + ``OPTIMISTIC``. + """ + ISOLATION_LEVEL_UNSPECIFIED = 0 + SERIALIZABLE = 1 + REPEATABLE_READ = 2 + class ReadWrite(proto.Message): r"""Message type to initiate a read-write transaction. Currently this transaction type has no options. @@ -445,19 +489,34 @@ class ReadLockMode(proto.Enum): READ_LOCK_MODE_UNSPECIFIED (0): Default value. - If the value is not specified, the pessimistic - read lock is used. + - If isolation level is ``REPEATABLE_READ``, then it is an + error to specify ``read_lock_mode``. Locking semantics + default to ``OPTIMISTIC``. No validation checks are done + for reads, except for: + + 1. reads done as part of queries that use + ``SELECT FOR UPDATE`` + 2. reads done as part of statements with a + ``LOCK_SCANNED_RANGES`` hint + 3. reads done as part of DML statements to validate that + the data that was served at the snapshot time is + unchanged at commit time. + + - At all other isolation levels, if ``read_lock_mode`` is + the default value, then pessimistic read lock is used. PESSIMISTIC (1): Pessimistic lock mode. - Read locks are acquired immediately on read. + Read locks are acquired immediately on read. Semantics + described only applies to ``SERIALIZABLE`` isolation. OPTIMISTIC (2): Optimistic lock mode. - Locks for reads within the transaction are not - acquired on read. Instead the locks are acquired - on a commit to validate that read/queried data - has not changed since the transaction started. + Locks for reads within the transaction are not acquired on + read. Instead the locks are acquired on a commit to validate + that read/queried data has not changed since the transaction + started. Semantics described only applies to + ``SERIALIZABLE`` isolation. """ READ_LOCK_MODE_UNSPECIFIED = 0 PESSIMISTIC = 1 @@ -616,6 +675,11 @@ class ReadOnly(proto.Message): proto.BOOL, number=5, ) + isolation_level: IsolationLevel = proto.Field( + proto.ENUM, + number=6, + enum=IsolationLevel, + ) class Transaction(proto.Message): diff --git a/google/cloud/spanner_v1/types/type.py b/google/cloud/spanner_v1/types/type.py index 4b86fc063f..e47c1077bb 100644 --- a/google/cloud/spanner_v1/types/type.py +++ b/google/cloud/spanner_v1/types/type.py @@ -108,6 +108,9 @@ class TypeCode(proto.Enum): integer. For example, ``P1Y2M3DT4H5M6.5S`` represents time duration of 1 year, 2 months, 3 days, 4 hours, 5 minutes, and 6.5 seconds. + UUID (17): + Encoded as ``string``, in lower-case hexa-decimal format, as + described in RFC 9562, section 4. """ TYPE_CODE_UNSPECIFIED = 0 BOOL = 1 @@ -125,6 +128,7 @@ class TypeCode(proto.Enum): PROTO = 13 ENUM = 14 INTERVAL = 16 + UUID = 17 class TypeAnnotationCode(proto.Enum): diff --git a/noxfile.py b/noxfile.py index f32c24f1e3..cb683afd7e 100644 --- a/noxfile.py +++ b/noxfile.py @@ -294,8 +294,18 @@ def install_systemtest_dependencies(session, *constraints): @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -@nox.parametrize("database_dialect", ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]) -def system(session, database_dialect): +@nox.parametrize( + "protobuf_implementation,database_dialect", + [ + ("python", "GOOGLE_STANDARD_SQL"), + ("python", "POSTGRESQL"), + ("upb", "GOOGLE_STANDARD_SQL"), + ("upb", "POSTGRESQL"), + ("cpp", "GOOGLE_STANDARD_SQL"), + ("cpp", "POSTGRESQL"), + ], +) +def system(session, protobuf_implementation, database_dialect): """Run the system test suite.""" constraints_path = str( CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" @@ -329,6 +339,12 @@ def system(session, database_dialect): install_systemtest_dependencies(session, "-c", constraints_path) + # TODO(https://github.com/googleapis/synthtool/issues/1976): + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") + # Run py.test against the system tests. if system_test_exists: session.run( @@ -338,6 +354,7 @@ def system(session, database_dialect): system_test_path, *session.posargs, env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, "SPANNER_DATABASE_DIALECT": database_dialect, "SKIP_BACKUP_TESTS": "true", }, @@ -350,6 +367,7 @@ def system(session, database_dialect): system_test_folder_path, *session.posargs, env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, "SPANNER_DATABASE_DIALECT": database_dialect, "SKIP_BACKUP_TESTS": "true", }, diff --git a/owlbot.py b/owlbot.py index e7fb391c2a..40443971d1 100644 --- a/owlbot.py +++ b/owlbot.py @@ -238,8 +238,18 @@ def place_before(path, text, *before_text, escape=None): """@nox.session\(python=SYSTEM_TEST_PYTHON_VERSIONS\) def system\(session\):""", """@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) -@nox.parametrize("database_dialect", ["GOOGLE_STANDARD_SQL", "POSTGRESQL"]) -def system(session, database_dialect):""", +@nox.parametrize( + "protobuf_implementation,database_dialect", + [ + ("python", "GOOGLE_STANDARD_SQL"), + ("python", "POSTGRESQL"), + ("upb", "GOOGLE_STANDARD_SQL"), + ("upb", "POSTGRESQL"), + ("cpp", "GOOGLE_STANDARD_SQL"), + ("cpp", "POSTGRESQL"), + ], +) +def system(session, protobuf_implementation, database_dialect):""", ) s.replace( @@ -248,6 +258,7 @@ def system(session, database_dialect):""", \)""", """*session.posargs, env={ + "PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": protobuf_implementation, "SPANNER_DATABASE_DIALECT": database_dialect, "SKIP_BACKUP_TESTS": "true", }, @@ -345,6 +356,19 @@ def mockserver(session): escape="()_*:", ) +s.replace( + "noxfile.py", + "install_systemtest_dependencies\(session, \"-c\", constraints_path\)", + """install_systemtest_dependencies(session, "-c", constraints_path) + + # TODO(https://github.com/googleapis/synthtool/issues/1976): + # Remove the 'cpp' implementation once support for Protobuf 3.x is dropped. + # The 'cpp' implementation requires Protobuf<4. + if protobuf_implementation == "cpp": + session.install("protobuf<4") +""" +) + place_before( "noxfile.py", "UNIT_TEST_PYTHON_VERSIONS: List[str] = [", diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json index aef1015b66..fc77bc1740 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json @@ -8,9 +8,178 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-database", - "version": "3.52.0" + "version": "3.53.0" }, "snippets": [ + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.spanner_admin_database_v1.DatabaseAdminAsyncClient", + "shortName": "DatabaseAdminAsyncClient" + }, + "fullName": "google.cloud.spanner_admin_database_v1.DatabaseAdminAsyncClient.add_split_points", + "method": { + "fullName": "google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints", + "service": { + "fullName": "google.spanner.admin.database.v1.DatabaseAdmin", + "shortName": "DatabaseAdmin" + }, + "shortName": "AddSplitPoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest" + }, + { + "name": "database", + "type": "str" + }, + { + "name": "split_points", + "type": "MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse", + "shortName": "add_split_points" + }, + "description": "Sample for AddSplitPoints", + "file": "spanner_v1_generated_database_admin_add_split_points_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_DatabaseAdmin_AddSplitPoints_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_database_admin_add_split_points_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.spanner_admin_database_v1.DatabaseAdminClient", + "shortName": "DatabaseAdminClient" + }, + "fullName": "google.cloud.spanner_admin_database_v1.DatabaseAdminClient.add_split_points", + "method": { + "fullName": "google.spanner.admin.database.v1.DatabaseAdmin.AddSplitPoints", + "service": { + "fullName": "google.spanner.admin.database.v1.DatabaseAdmin", + "shortName": "DatabaseAdmin" + }, + "shortName": "AddSplitPoints" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.spanner_admin_database_v1.types.AddSplitPointsRequest" + }, + { + "name": "database", + "type": "str" + }, + { + "name": "split_points", + "type": "MutableSequence[google.cloud.spanner_admin_database_v1.types.SplitPoints]" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, Union[str, bytes]]]" + } + ], + "resultType": "google.cloud.spanner_admin_database_v1.types.AddSplitPointsResponse", + "shortName": "add_split_points" + }, + "description": "Sample for AddSplitPoints", + "file": "spanner_v1_generated_database_admin_add_split_points_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "spanner_v1_generated_DatabaseAdmin_AddSplitPoints_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "spanner_v1_generated_database_admin_add_split_points_sync.py" + }, { "canonical": true, "clientMethod": { @@ -59,7 +228,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -151,7 +320,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -240,7 +409,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.BackupSchedule", @@ -328,7 +497,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.BackupSchedule", @@ -417,7 +586,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -505,7 +674,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -590,7 +759,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -674,7 +843,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -755,7 +924,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_backup_schedule" @@ -832,7 +1001,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_backup_schedule" @@ -910,7 +1079,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_backup" @@ -987,7 +1156,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_backup" @@ -1065,7 +1234,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "drop_database" @@ -1142,7 +1311,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "drop_database" @@ -1220,7 +1389,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.BackupSchedule", @@ -1300,7 +1469,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.BackupSchedule", @@ -1381,7 +1550,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.Backup", @@ -1461,7 +1630,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.Backup", @@ -1542,7 +1711,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse", @@ -1622,7 +1791,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.GetDatabaseDdlResponse", @@ -1703,7 +1872,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.Database", @@ -1783,7 +1952,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.Database", @@ -1864,7 +2033,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -1944,7 +2113,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -2025,7 +2194,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsAsyncPager", @@ -2105,7 +2274,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupOperationsPager", @@ -2186,7 +2355,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesAsyncPager", @@ -2266,7 +2435,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupSchedulesPager", @@ -2347,7 +2516,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsAsyncPager", @@ -2427,7 +2596,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListBackupsPager", @@ -2508,7 +2677,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsAsyncPager", @@ -2588,7 +2757,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseOperationsPager", @@ -2669,7 +2838,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesAsyncPager", @@ -2749,7 +2918,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabaseRolesPager", @@ -2830,7 +2999,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesAsyncPager", @@ -2910,7 +3079,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.services.database_admin.pagers.ListDatabasesPager", @@ -2999,7 +3168,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3087,7 +3256,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -3168,7 +3337,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -3248,7 +3417,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -3333,7 +3502,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", @@ -3417,7 +3586,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", @@ -3502,7 +3671,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.BackupSchedule", @@ -3586,7 +3755,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.BackupSchedule", @@ -3671,7 +3840,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.Backup", @@ -3755,7 +3924,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_database_v1.types.Backup", @@ -3840,7 +4009,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3924,7 +4093,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -4009,7 +4178,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -4093,7 +4262,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json index 6d216a11b2..74eaaff2f8 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-instance", - "version": "3.52.0" + "version": "3.53.0" }, "snippets": [ { @@ -55,7 +55,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -143,7 +143,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -232,7 +232,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -320,7 +320,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -409,7 +409,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -497,7 +497,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -578,7 +578,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_instance_config" @@ -655,7 +655,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_instance_config" @@ -733,7 +733,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_instance_partition" @@ -810,7 +810,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_instance_partition" @@ -888,7 +888,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_instance" @@ -965,7 +965,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_instance" @@ -1043,7 +1043,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -1123,7 +1123,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -1204,7 +1204,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.types.InstanceConfig", @@ -1284,7 +1284,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.types.InstanceConfig", @@ -1365,7 +1365,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.types.InstancePartition", @@ -1445,7 +1445,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.types.InstancePartition", @@ -1526,7 +1526,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.types.Instance", @@ -1606,7 +1606,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.types.Instance", @@ -1687,7 +1687,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigOperationsAsyncPager", @@ -1767,7 +1767,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigOperationsPager", @@ -1848,7 +1848,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigsAsyncPager", @@ -1928,7 +1928,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstanceConfigsPager", @@ -2009,7 +2009,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionOperationsAsyncPager", @@ -2089,7 +2089,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionOperationsPager", @@ -2170,7 +2170,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionsAsyncPager", @@ -2250,7 +2250,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancePartitionsPager", @@ -2331,7 +2331,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesAsyncPager", @@ -2411,7 +2411,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_admin_instance_v1.services.instance_admin.pagers.ListInstancesPager", @@ -2488,7 +2488,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -2564,7 +2564,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -2645,7 +2645,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -2725,7 +2725,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.policy_pb2.Policy", @@ -2810,7 +2810,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", @@ -2894,7 +2894,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse", @@ -2979,7 +2979,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3063,7 +3063,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -3148,7 +3148,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3232,7 +3232,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", @@ -3317,7 +3317,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation_async.AsyncOperation", @@ -3401,7 +3401,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.api_core.operation.Operation", diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json index 09626918ec..ba20d6b76a 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner", - "version": "3.52.0" + "version": "3.53.0" }, "snippets": [ { @@ -51,7 +51,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.BatchCreateSessionsResponse", @@ -135,7 +135,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.BatchCreateSessionsResponse", @@ -220,7 +220,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]", @@ -304,7 +304,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "Iterable[google.cloud.spanner_v1.types.BatchWriteResponse]", @@ -389,7 +389,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.Transaction", @@ -473,7 +473,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.Transaction", @@ -566,7 +566,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.CommitResponse", @@ -658,7 +658,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.CommitResponse", @@ -739,7 +739,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.Session", @@ -819,7 +819,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.Session", @@ -900,7 +900,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_session" @@ -977,7 +977,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "delete_session" @@ -1051,7 +1051,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.ExecuteBatchDmlResponse", @@ -1127,7 +1127,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.ExecuteBatchDmlResponse", @@ -1204,7 +1204,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.ResultSet", @@ -1280,7 +1280,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.ResultSet", @@ -1357,7 +1357,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "Iterable[google.cloud.spanner_v1.types.PartialResultSet]", @@ -1433,7 +1433,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "Iterable[google.cloud.spanner_v1.types.PartialResultSet]", @@ -1514,7 +1514,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.Session", @@ -1594,7 +1594,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.Session", @@ -1675,7 +1675,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.services.spanner.pagers.ListSessionsAsyncPager", @@ -1755,7 +1755,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.services.spanner.pagers.ListSessionsPager", @@ -1832,7 +1832,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.PartitionResponse", @@ -1908,7 +1908,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.PartitionResponse", @@ -1985,7 +1985,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.PartitionResponse", @@ -2061,7 +2061,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.PartitionResponse", @@ -2138,7 +2138,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.ResultSet", @@ -2214,7 +2214,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "google.cloud.spanner_v1.types.ResultSet", @@ -2299,7 +2299,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "rollback" @@ -2380,7 +2380,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "shortName": "rollback" @@ -2454,7 +2454,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "Iterable[google.cloud.spanner_v1.types.PartialResultSet]", @@ -2530,7 +2530,7 @@ }, { "name": "metadata", - "type": "Sequence[Tuple[str, str]" + "type": "Sequence[Tuple[str, Union[str, bytes]]]" } ], "resultType": "Iterable[google.cloud.spanner_v1.types.PartialResultSet]", diff --git a/samples/generated_samples/spanner_v1_generated_database_admin_add_split_points_async.py b/samples/generated_samples/spanner_v1_generated_database_admin_add_split_points_async.py new file mode 100644 index 0000000000..9ecd231125 --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_database_admin_add_split_points_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddSplitPoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner-admin-database + + +# [START spanner_v1_generated_DatabaseAdmin_AddSplitPoints_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_admin_database_v1 + + +async def sample_add_split_points(): + # Create a client + client = spanner_admin_database_v1.DatabaseAdminAsyncClient() + + # Initialize request argument(s) + request = spanner_admin_database_v1.AddSplitPointsRequest( + database="database_value", + ) + + # Make the request + response = await client.add_split_points(request=request) + + # Handle the response + print(response) + +# [END spanner_v1_generated_DatabaseAdmin_AddSplitPoints_async] diff --git a/samples/generated_samples/spanner_v1_generated_database_admin_add_split_points_sync.py b/samples/generated_samples/spanner_v1_generated_database_admin_add_split_points_sync.py new file mode 100644 index 0000000000..43c01f8c9f --- /dev/null +++ b/samples/generated_samples/spanner_v1_generated_database_admin_add_split_points_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2024 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for AddSplitPoints +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-spanner-admin-database + + +# [START spanner_v1_generated_DatabaseAdmin_AddSplitPoints_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import spanner_admin_database_v1 + + +def sample_add_split_points(): + # Create a client + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Initialize request argument(s) + request = spanner_admin_database_v1.AddSplitPointsRequest( + database="database_value", + ) + + # Make the request + response = client.add_split_points(request=request) + + # Handle the response + print(response) + +# [END spanner_v1_generated_DatabaseAdmin_AddSplitPoints_sync] diff --git a/scripts/fixup_spanner_admin_database_v1_keywords.py b/scripts/fixup_spanner_admin_database_v1_keywords.py index 0c7fea2c42..bb10888f92 100644 --- a/scripts/fixup_spanner_admin_database_v1_keywords.py +++ b/scripts/fixup_spanner_admin_database_v1_keywords.py @@ -39,6 +39,7 @@ def partition( class spanner_admin_databaseCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'add_split_points': ('database', 'split_points', 'initiator', ), 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', 'encryption_config', ), 'create_backup': ('parent', 'backup_id', 'backup', 'encryption_config', ), 'create_backup_schedule': ('parent', 'backup_schedule_id', 'backup_schedule', ), diff --git a/scripts/fixup_spanner_v1_keywords.py b/scripts/fixup_spanner_v1_keywords.py index f886864774..91d94cbef8 100644 --- a/scripts/fixup_spanner_v1_keywords.py +++ b/scripts/fixup_spanner_v1_keywords.py @@ -45,9 +45,9 @@ class spannerCallTransformer(cst.CSTTransformer): 'commit': ('session', 'transaction_id', 'single_use_transaction', 'mutations', 'return_commit_stats', 'max_commit_delay', 'request_options', 'precommit_token', ), 'create_session': ('database', 'session', ), 'delete_session': ('name', ), - 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', 'request_options', ), - 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'directed_read_options', 'data_boost_enabled', ), - 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'directed_read_options', 'data_boost_enabled', ), + 'execute_batch_dml': ('session', 'transaction', 'statements', 'seqno', 'request_options', 'last_statements', ), + 'execute_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'directed_read_options', 'data_boost_enabled', 'last_statement', ), + 'execute_streaming_sql': ('session', 'sql', 'transaction', 'params', 'param_types', 'resume_token', 'query_mode', 'partition_token', 'seqno', 'query_options', 'request_options', 'directed_read_options', 'data_boost_enabled', 'last_statement', ), 'get_session': ('name', ), 'list_sessions': ('database', 'page_size', 'page_token', 'filter', ), 'partition_query': ('session', 'sql', 'transaction', 'params', 'param_types', 'partition_options', ), diff --git a/setup.py b/setup.py index 619607b794..a32883075b 100644 --- a/setup.py +++ b/setup.py @@ -36,13 +36,13 @@ release_status = "Development Status :: 5 - Production/Stable" dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "google-cloud-core >= 1.4.4, < 3.0dev", - "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", - "proto-plus >= 1.22.0, <2.0.0dev", + "google-api-core[grpc] >= 1.34.0, <3.0.0,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "google-cloud-core >= 1.4.4, < 3.0.0", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0", + "proto-plus >= 1.22.0, <2.0.0", "sqlparse >= 0.4.4", - "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", - "protobuf>=3.20.2,<6.0.0dev,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "proto-plus >= 1.22.2, <2.0.0; python_version>='3.11'", + "protobuf>=3.20.2,<7.0.0,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", "grpc-interceptor >= 0.15.4", ] extras = { @@ -50,7 +50,9 @@ "opentelemetry-api >= 1.22.0", "opentelemetry-sdk >= 1.22.0", "opentelemetry-semantic-conventions >= 0.43b0", + "opentelemetry-resourcedetector-gcp >= 1.8.0a0", "google-cloud-monitoring >= 2.16.0", + "mmh3 >= 4.1.0 ", ], "libcst": "libcst >= 0.2.5", } diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt index 5369861daf..ad3f0fa58e 100644 --- a/testing/constraints-3.10.txt +++ b/testing/constraints-3.10.txt @@ -5,4 +5,3 @@ google-api-core proto-plus protobuf grpc-google-iam-v1 -google-cloud-monitoring diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt index 28bc2bd36c..ad3f0fa58e 100644 --- a/testing/constraints-3.11.txt +++ b/testing/constraints-3.11.txt @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- # This constraints file is required for unit tests. # List all library dependencies and extras in this file. -google-cloud-monitoring google-api-core proto-plus protobuf diff --git a/testing/constraints-3.12.txt b/testing/constraints-3.12.txt index 5369861daf..ad3f0fa58e 100644 --- a/testing/constraints-3.12.txt +++ b/testing/constraints-3.12.txt @@ -5,4 +5,3 @@ google-api-core proto-plus protobuf grpc-google-iam-v1 -google-cloud-monitoring diff --git a/testing/constraints-3.13.txt b/testing/constraints-3.13.txt index 5369861daf..ad3f0fa58e 100644 --- a/testing/constraints-3.13.txt +++ b/testing/constraints-3.13.txt @@ -5,4 +5,3 @@ google-api-core proto-plus protobuf grpc-google-iam-v1 -google-cloud-monitoring diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index af33b0c8e8..58482dcd03 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -17,3 +17,4 @@ protobuf==3.20.2 deprecated==1.2.14 grpc-interceptor==0.15.4 google-cloud-monitoring==2.16.0 +mmh3==4.1.0 diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index 5369861daf..ad3f0fa58e 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -5,4 +5,3 @@ google-api-core proto-plus protobuf grpc-google-iam-v1 -google-cloud-monitoring diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt index 5369861daf..ad3f0fa58e 100644 --- a/testing/constraints-3.9.txt +++ b/testing/constraints-3.9.txt @@ -5,4 +5,3 @@ google-api-core proto-plus protobuf grpc-google-iam-v1 -google-cloud-monitoring diff --git a/tests/mockserver_tests/test_basics.py b/tests/mockserver_tests/test_basics.py index d34065a6ff..3706552d31 100644 --- a/tests/mockserver_tests/test_basics.py +++ b/tests/mockserver_tests/test_basics.py @@ -20,7 +20,10 @@ ExecuteSqlRequest, BeginTransactionRequest, TransactionOptions, + ExecuteBatchDmlRequest, + TypeCode, ) +from google.cloud.spanner_v1.transaction import Transaction from google.cloud.spanner_v1.testing.mock_spanner import SpannerServicer from tests.mockserver_tests.mock_server_test_base import ( @@ -29,6 +32,7 @@ add_update_count, add_error, unavailable_status, + add_single_result, ) @@ -107,3 +111,56 @@ def test_execute_streaming_sql_unavailable(self): # The ExecuteStreamingSql call should be retried. self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) + + def test_last_statement_update(self): + sql = "update my_table set my_col=1 where id=2" + add_update_count(sql, 1) + self.database.run_in_transaction( + lambda transaction: transaction.execute_update(sql, last_statement=True) + ) + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteSqlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests), msg=requests) + self.assertTrue(requests[0].last_statement, requests[0]) + + def test_last_statement_batch_update(self): + sql = "update my_table set my_col=1 where id=2" + add_update_count(sql, 1) + self.database.run_in_transaction( + lambda transaction: transaction.batch_update( + [sql, sql], last_statement=True + ) + ) + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteBatchDmlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests), msg=requests) + self.assertTrue(requests[0].last_statements, requests[0]) + + def test_last_statement_query(self): + sql = "insert into my_table (value) values ('One') then return id" + add_single_result(sql, "c", TypeCode.INT64, [("1",)]) + self.database.run_in_transaction( + lambda transaction: _execute_query(transaction, sql) + ) + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteSqlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests), msg=requests) + self.assertTrue(requests[0].last_statement, requests[0]) + + +def _execute_query(transaction: Transaction, sql: str): + rows = transaction.execute_sql(sql, last_statement=True) + for _ in rows: + pass diff --git a/tests/mockserver_tests/test_dbapi_autocommit.py b/tests/mockserver_tests/test_dbapi_autocommit.py new file mode 100644 index 0000000000..7f0e3e432f --- /dev/null +++ b/tests/mockserver_tests/test_dbapi_autocommit.py @@ -0,0 +1,127 @@ +# Copyright 2025 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.spanner_dbapi import Connection +from google.cloud.spanner_v1 import ( + ExecuteSqlRequest, + TypeCode, + CommitRequest, + ExecuteBatchDmlRequest, +) +from tests.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_single_result, + add_update_count, +) + + +class TestDbapiAutoCommit(MockServerTestBase): + @classmethod + def setup_class(cls): + super().setup_class() + add_single_result( + "select name from singers", "name", TypeCode.STRING, [("Some Singer",)] + ) + add_update_count("insert into singers (id, name) values (1, 'Some Singer')", 1) + + def test_select_autocommit(self): + connection = Connection(self.instance, self.database) + connection.autocommit = True + with connection.cursor() as cursor: + cursor.execute("select name from singers") + result_list = cursor.fetchall() + for _ in result_list: + pass + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteSqlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests)) + self.assertFalse(requests[0].last_statement, requests[0]) + self.assertIsNotNone(requests[0].transaction, requests[0]) + self.assertIsNotNone(requests[0].transaction.single_use, requests[0]) + self.assertTrue(requests[0].transaction.single_use.read_only, requests[0]) + + def test_dml_autocommit(self): + connection = Connection(self.instance, self.database) + connection.autocommit = True + with connection.cursor() as cursor: + cursor.execute("insert into singers (id, name) values (1, 'Some Singer')") + self.assertEqual(1, cursor.rowcount) + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteSqlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests)) + self.assertTrue(requests[0].last_statement, requests[0]) + commit_requests = list( + filter( + lambda msg: isinstance(msg, CommitRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(commit_requests)) + + def test_executemany_autocommit(self): + connection = Connection(self.instance, self.database) + connection.autocommit = True + with connection.cursor() as cursor: + cursor.executemany( + "insert into singers (id, name) values (1, 'Some Singer')", [(), ()] + ) + self.assertEqual(2, cursor.rowcount) + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteBatchDmlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests)) + self.assertTrue(requests[0].last_statements, requests[0]) + commit_requests = list( + filter( + lambda msg: isinstance(msg, CommitRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(commit_requests)) + + def test_batch_dml_autocommit(self): + connection = Connection(self.instance, self.database) + connection.autocommit = True + with connection.cursor() as cursor: + cursor.execute("start batch dml") + cursor.execute("insert into singers (id, name) values (1, 'Some Singer')") + cursor.execute("insert into singers (id, name) values (1, 'Some Singer')") + cursor.execute("run batch") + self.assertEqual(2, cursor.rowcount) + requests = list( + filter( + lambda msg: isinstance(msg, ExecuteBatchDmlRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(requests)) + self.assertTrue(requests[0].last_statements, requests[0]) + commit_requests = list( + filter( + lambda msg: isinstance(msg, CommitRequest), + self.spanner_service.requests, + ) + ) + self.assertEqual(1, len(commit_requests)) diff --git a/tests/mockserver_tests/test_tags.py b/tests/mockserver_tests/test_tags.py index c84d69b7bd..f44a9fb9a9 100644 --- a/tests/mockserver_tests/test_tags.py +++ b/tests/mockserver_tests/test_tags.py @@ -181,10 +181,16 @@ def test_request_tag_is_cleared(self): # This query will not have a request tag. cursor.execute("select name from singers") requests = self.spanner_service.requests - self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) - self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) - self.assertEqual("my_tag", requests[1].request_options.request_tag) - self.assertEqual("", requests[2].request_options.request_tag) + + # Filter for SQL requests calls + sql_requests = [ + request for request in requests if isinstance(request, ExecuteSqlRequest) + ] + + self.assertTrue(isinstance(sql_requests[0], ExecuteSqlRequest)) + self.assertTrue(isinstance(sql_requests[1], ExecuteSqlRequest)) + self.assertEqual("my_tag", sql_requests[0].request_options.request_tag) + self.assertEqual("", sql_requests[1].request_options.request_tag) def _execute_and_verify_select_singers( self, connection: Connection, request_tag: str = "", transaction_tag: str = "" diff --git a/tests/system/test_database_api.py b/tests/system/test_database_api.py index c8b3c543fc..57ce49c8a2 100644 --- a/tests/system/test_database_api.py +++ b/tests/system/test_database_api.py @@ -294,7 +294,8 @@ def test_iam_policy( new_policy = temp_db.get_iam_policy(3) assert new_policy.version == 3 - assert new_policy.bindings == [new_binding] + assert len(new_policy.bindings) == 1 + assert new_policy.bindings[0] == new_binding def test_table_not_found(shared_instance): diff --git a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py index 5e14c8b66d..8c49a448c7 100644 --- a/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py +++ b/tests/unit/gapic/spanner_admin_database_v1/test_database_admin.py @@ -83,12 +83,21 @@ from google.protobuf import duration_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import struct_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore from google.type import expr_pb2 # type: ignore import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + async def mock_async_gen(data, chunk_size=1): for i in range(0, len(data)): # pragma: NO COVER chunk = data[i : i + chunk_size] @@ -343,83 +352,46 @@ def test__get_universe_domain(): @pytest.mark.parametrize( - "client_class,transport_class,transport_name", + "error_code,cred_info_json,show_cred_info", [ - (DatabaseAdminClient, transports.DatabaseAdminGrpcTransport, "grpc"), - (DatabaseAdminClient, transports.DatabaseAdminRestTransport, "rest"), + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), ], ) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = DatabaseAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - transport = transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - # TODO: This is needed to cater for older versions of google-auth - # Make this test unconditional once the minimum supported version of - # google-auth becomes 2.23.0 or higher. - google_auth_major, google_auth_minor = [ - int(part) for part in google.auth.__version__.split(".")[0:2] - ] - if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): - credentials = ga_credentials.AnonymousCredentials() - credentials._universe_domain = "foo.com" - # Test the case when there is a universe mismatch from the credentials. - client = client_class(transport=transport_class(credentials=credentials)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = DatabaseAdminClient(credentials=cred) + client._transport._credentials = cred - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor = [ - int(part) for part in api_core_version.__version__.split(".")[0:2] - ] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class( - client_options={"universe_domain": "bar.com"}, - transport=transport_class( - credentials=ga_credentials.AnonymousCredentials(), - ), - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code - # Test that ValueError is raised if universe_domain is provided via client options and credentials is None - with pytest.raises(ValueError): - client._compare_universes("foo.bar", None) + client._add_cred_info_for_auth_errors(error) + assert error.details == [] @pytest.mark.parametrize( @@ -9025,11 +8997,11 @@ async def test_list_database_roles_async_pages(): @pytest.mark.parametrize( "request_type", [ - gsad_backup_schedule.CreateBackupScheduleRequest, + spanner_database_admin.AddSplitPointsRequest, dict, ], ) -def test_create_backup_schedule(request_type, transport: str = "grpc"): +def test_add_split_points(request_type, transport: str = "grpc"): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9040,27 +9012,22 @@ def test_create_backup_schedule(request_type, transport: str = "grpc"): request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = gsad_backup_schedule.BackupSchedule( - name="name_value", - ) - response = client.create_backup_schedule(request) + call.return_value = spanner_database_admin.AddSplitPointsResponse() + response = client.add_split_points(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request = gsad_backup_schedule.CreateBackupScheduleRequest() + request = spanner_database_admin.AddSplitPointsRequest() assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, gsad_backup_schedule.BackupSchedule) - assert response.name == "name_value" + assert isinstance(response, spanner_database_admin.AddSplitPointsResponse) -def test_create_backup_schedule_non_empty_request_with_auto_populated_field(): +def test_add_split_points_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. client = DatabaseAdminClient( @@ -9071,28 +9038,26 @@ def test_create_backup_schedule_non_empty_request_with_auto_populated_field(): # Populate all string fields in the request which are not UUID4 # since we want to check that UUID4 are populated automatically # if they meet the requirements of AIP 4235. - request = gsad_backup_schedule.CreateBackupScheduleRequest( - parent="parent_value", - backup_schedule_id="backup_schedule_id_value", + request = spanner_database_admin.AddSplitPointsRequest( + database="database_value", + initiator="initiator_value", ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: call.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client.create_backup_schedule(request=request) + client.add_split_points(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == gsad_backup_schedule.CreateBackupScheduleRequest( - parent="parent_value", - backup_schedule_id="backup_schedule_id_value", + assert args[0] == spanner_database_admin.AddSplitPointsRequest( + database="database_value", + initiator="initiator_value", ) -def test_create_backup_schedule_use_cached_wrapped_rpc(): +def test_add_split_points_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -9106,10 +9071,7 @@ def test_create_backup_schedule_use_cached_wrapped_rpc(): wrapper_fn.reset_mock() # Ensure method has been cached - assert ( - client._transport.create_backup_schedule - in client._transport._wrapped_methods - ) + assert client._transport.add_split_points in client._transport._wrapped_methods # Replace cached wrapped function with mock mock_rpc = mock.Mock() @@ -9117,15 +9079,15 @@ def test_create_backup_schedule_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.create_backup_schedule + client._transport.add_split_points ] = mock_rpc request = {} - client.create_backup_schedule(request) + client.add_split_points(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.create_backup_schedule(request) + client.add_split_points(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -9133,7 +9095,7 @@ def test_create_backup_schedule_use_cached_wrapped_rpc(): @pytest.mark.asyncio -async def test_create_backup_schedule_async_use_cached_wrapped_rpc( +async def test_add_split_points_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", ): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, @@ -9150,7 +9112,7 @@ async def test_create_backup_schedule_async_use_cached_wrapped_rpc( # Ensure method has been cached assert ( - client._client._transport.create_backup_schedule + client._client._transport.add_split_points in client._client._transport._wrapped_methods ) @@ -9158,16 +9120,16 @@ async def test_create_backup_schedule_async_use_cached_wrapped_rpc( mock_rpc = mock.AsyncMock() mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ - client._client._transport.create_backup_schedule + client._client._transport.add_split_points ] = mock_rpc request = {} - await client.create_backup_schedule(request) + await client.add_split_points(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - await client.create_backup_schedule(request) + await client.add_split_points(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -9175,9 +9137,9 @@ async def test_create_backup_schedule_async_use_cached_wrapped_rpc( @pytest.mark.asyncio -async def test_create_backup_schedule_async( +async def test_add_split_points_async( transport: str = "grpc_asyncio", - request_type=gsad_backup_schedule.CreateBackupScheduleRequest, + request_type=spanner_database_admin.AddSplitPointsRequest, ): client = DatabaseAdminAsyncClient( credentials=async_anonymous_credentials(), @@ -9189,50 +9151,43 @@ async def test_create_backup_schedule_async( request = request_type() # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gsad_backup_schedule.BackupSchedule( - name="name_value", - ) + spanner_database_admin.AddSplitPointsResponse() ) - response = await client.create_backup_schedule(request) + response = await client.add_split_points(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request = gsad_backup_schedule.CreateBackupScheduleRequest() + request = spanner_database_admin.AddSplitPointsRequest() assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, gsad_backup_schedule.BackupSchedule) - assert response.name == "name_value" + assert isinstance(response, spanner_database_admin.AddSplitPointsResponse) @pytest.mark.asyncio -async def test_create_backup_schedule_async_from_dict(): - await test_create_backup_schedule_async(request_type=dict) +async def test_add_split_points_async_from_dict(): + await test_add_split_points_async(request_type=dict) -def test_create_backup_schedule_field_headers(): +def test_add_split_points_field_headers(): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = gsad_backup_schedule.CreateBackupScheduleRequest() + request = spanner_database_admin.AddSplitPointsRequest() - request.parent = "parent_value" + request.database = "database_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: - call.return_value = gsad_backup_schedule.BackupSchedule() - client.create_backup_schedule(request) + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: + call.return_value = spanner_database_admin.AddSplitPointsResponse() + client.add_split_points(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -9243,30 +9198,28 @@ def test_create_backup_schedule_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "database=database_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_create_backup_schedule_field_headers_async(): +async def test_add_split_points_field_headers_async(): client = DatabaseAdminAsyncClient( credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = gsad_backup_schedule.CreateBackupScheduleRequest() + request = spanner_database_admin.AddSplitPointsRequest() - request.parent = "parent_value" + request.database = "database_value" # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gsad_backup_schedule.BackupSchedule() + spanner_database_admin.AddSplitPointsResponse() ) - await client.create_backup_schedule(request) + await client.add_split_points(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -9277,45 +9230,39 @@ async def test_create_backup_schedule_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "parent=parent_value", + "database=database_value", ) in kw["metadata"] -def test_create_backup_schedule_flattened(): +def test_add_split_points_flattened(): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = gsad_backup_schedule.BackupSchedule() + call.return_value = spanner_database_admin.AddSplitPointsResponse() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.create_backup_schedule( - parent="parent_value", - backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), - backup_schedule_id="backup_schedule_id_value", + client.add_split_points( + database="database_value", + split_points=[spanner_database_admin.SplitPoints(table="table_value")], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_schedule - mock_val = gsad_backup_schedule.BackupSchedule(name="name_value") + arg = args[0].database + mock_val = "database_value" assert arg == mock_val - arg = args[0].backup_schedule_id - mock_val = "backup_schedule_id_value" + arg = args[0].split_points + mock_val = [spanner_database_admin.SplitPoints(table="table_value")] assert arg == mock_val -def test_create_backup_schedule_flattened_error(): +def test_add_split_points_flattened_error(): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) @@ -9323,55 +9270,48 @@ def test_create_backup_schedule_flattened_error(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - client.create_backup_schedule( - gsad_backup_schedule.CreateBackupScheduleRequest(), - parent="parent_value", - backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), - backup_schedule_id="backup_schedule_id_value", + client.add_split_points( + spanner_database_admin.AddSplitPointsRequest(), + database="database_value", + split_points=[spanner_database_admin.SplitPoints(table="table_value")], ) @pytest.mark.asyncio -async def test_create_backup_schedule_flattened_async(): +async def test_add_split_points_flattened_async(): client = DatabaseAdminAsyncClient( credentials=async_anonymous_credentials(), ) # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup_schedule), "__call__" - ) as call: + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: # Designate an appropriate return value for the call. - call.return_value = gsad_backup_schedule.BackupSchedule() + call.return_value = spanner_database_admin.AddSplitPointsResponse() call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - gsad_backup_schedule.BackupSchedule() + spanner_database_admin.AddSplitPointsResponse() ) # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - response = await client.create_backup_schedule( - parent="parent_value", - backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), - backup_schedule_id="backup_schedule_id_value", + response = await client.add_split_points( + database="database_value", + split_points=[spanner_database_admin.SplitPoints(table="table_value")], ) # Establish that the underlying call was made with the expected # request object values. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = "parent_value" - assert arg == mock_val - arg = args[0].backup_schedule - mock_val = gsad_backup_schedule.BackupSchedule(name="name_value") + arg = args[0].database + mock_val = "database_value" assert arg == mock_val - arg = args[0].backup_schedule_id - mock_val = "backup_schedule_id_value" + arg = args[0].split_points + mock_val = [spanner_database_admin.SplitPoints(table="table_value")] assert arg == mock_val @pytest.mark.asyncio -async def test_create_backup_schedule_flattened_error_async(): +async def test_add_split_points_flattened_error_async(): client = DatabaseAdminAsyncClient( credentials=async_anonymous_credentials(), ) @@ -9379,22 +9319,21 @@ async def test_create_backup_schedule_flattened_error_async(): # Attempting to call a method with both a request object and flattened # fields is an error. with pytest.raises(ValueError): - await client.create_backup_schedule( - gsad_backup_schedule.CreateBackupScheduleRequest(), - parent="parent_value", - backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), - backup_schedule_id="backup_schedule_id_value", + await client.add_split_points( + spanner_database_admin.AddSplitPointsRequest(), + database="database_value", + split_points=[spanner_database_admin.SplitPoints(table="table_value")], ) @pytest.mark.parametrize( "request_type", [ - backup_schedule.GetBackupScheduleRequest, + gsad_backup_schedule.CreateBackupScheduleRequest, dict, ], ) -def test_get_backup_schedule(request_type, transport: str = "grpc"): +def test_create_backup_schedule(request_type, transport: str = "grpc"): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, @@ -9406,26 +9345,26 @@ def test_get_backup_schedule(request_type, transport: str = "grpc"): # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_backup_schedule), "__call__" + type(client.transport.create_backup_schedule), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = backup_schedule.BackupSchedule( + call.return_value = gsad_backup_schedule.BackupSchedule( name="name_value", ) - response = client.get_backup_schedule(request) + response = client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] - request = backup_schedule.GetBackupScheduleRequest() + request = gsad_backup_schedule.CreateBackupScheduleRequest() assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, backup_schedule.BackupSchedule) + assert isinstance(response, gsad_backup_schedule.BackupSchedule) assert response.name == "name_value" -def test_get_backup_schedule_non_empty_request_with_auto_populated_field(): +def test_create_backup_schedule_non_empty_request_with_auto_populated_field(): # This test is a coverage failsafe to make sure that UUID4 fields are # automatically populated, according to AIP-4235, with non-empty requests. client = DatabaseAdminClient( @@ -9436,26 +9375,28 @@ def test_get_backup_schedule_non_empty_request_with_auto_populated_field(): # Populate all string fields in the request which are not UUID4 # since we want to check that UUID4 are populated automatically # if they meet the requirements of AIP 4235. - request = backup_schedule.GetBackupScheduleRequest( - name="name_value", + request = gsad_backup_schedule.CreateBackupScheduleRequest( + parent="parent_value", + backup_schedule_id="backup_schedule_id_value", ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_backup_schedule), "__call__" + type(client.transport.create_backup_schedule), "__call__" ) as call: call.return_value.name = ( "foo" # operation_request.operation in compute client(s) expect a string. ) - client.get_backup_schedule(request=request) + client.create_backup_schedule(request=request) call.assert_called() _, args, _ = call.mock_calls[0] - assert args[0] == backup_schedule.GetBackupScheduleRequest( - name="name_value", + assert args[0] == gsad_backup_schedule.CreateBackupScheduleRequest( + parent="parent_value", + backup_schedule_id="backup_schedule_id_value", ) -def test_get_backup_schedule_use_cached_wrapped_rpc(): +def test_create_backup_schedule_use_cached_wrapped_rpc(): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, # instead of constructing them on each call with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: @@ -9470,7 +9411,8 @@ def test_get_backup_schedule_use_cached_wrapped_rpc(): # Ensure method has been cached assert ( - client._transport.get_backup_schedule in client._transport._wrapped_methods + client._transport.create_backup_schedule + in client._transport._wrapped_methods ) # Replace cached wrapped function with mock @@ -9479,15 +9421,15 @@ def test_get_backup_schedule_use_cached_wrapped_rpc(): "foo" # operation_request.operation in compute client(s) expect a string. ) client._transport._wrapped_methods[ - client._transport.get_backup_schedule + client._transport.create_backup_schedule ] = mock_rpc request = {} - client.get_backup_schedule(request) + client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - client.get_backup_schedule(request) + client.create_backup_schedule(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -9495,7 +9437,7 @@ def test_get_backup_schedule_use_cached_wrapped_rpc(): @pytest.mark.asyncio -async def test_get_backup_schedule_async_use_cached_wrapped_rpc( +async def test_create_backup_schedule_async_use_cached_wrapped_rpc( transport: str = "grpc_asyncio", ): # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, @@ -9512,7 +9454,7 @@ async def test_get_backup_schedule_async_use_cached_wrapped_rpc( # Ensure method has been cached assert ( - client._client._transport.get_backup_schedule + client._client._transport.create_backup_schedule in client._client._transport._wrapped_methods ) @@ -9520,16 +9462,16 @@ async def test_get_backup_schedule_async_use_cached_wrapped_rpc( mock_rpc = mock.AsyncMock() mock_rpc.return_value = mock.Mock() client._client._transport._wrapped_methods[ - client._client._transport.get_backup_schedule + client._client._transport.create_backup_schedule ] = mock_rpc request = {} - await client.get_backup_schedule(request) + await client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. assert mock_rpc.call_count == 1 - await client.get_backup_schedule(request) + await client.create_backup_schedule(request) # Establish that a new wrapper was not created for this call assert wrapper_fn.call_count == 0 @@ -9537,9 +9479,9 @@ async def test_get_backup_schedule_async_use_cached_wrapped_rpc( @pytest.mark.asyncio -async def test_get_backup_schedule_async( +async def test_create_backup_schedule_async( transport: str = "grpc_asyncio", - request_type=backup_schedule.GetBackupScheduleRequest, + request_type=gsad_backup_schedule.CreateBackupScheduleRequest, ): client = DatabaseAdminAsyncClient( credentials=async_anonymous_credentials(), @@ -9552,49 +9494,49 @@ async def test_get_backup_schedule_async( # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_backup_schedule), "__call__" + type(client.transport.create_backup_schedule), "__call__" ) as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - backup_schedule.BackupSchedule( + gsad_backup_schedule.BackupSchedule( name="name_value", ) ) - response = await client.get_backup_schedule(request) + response = await client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] - request = backup_schedule.GetBackupScheduleRequest() + request = gsad_backup_schedule.CreateBackupScheduleRequest() assert args[0] == request # Establish that the response is the type that we expect. - assert isinstance(response, backup_schedule.BackupSchedule) + assert isinstance(response, gsad_backup_schedule.BackupSchedule) assert response.name == "name_value" @pytest.mark.asyncio -async def test_get_backup_schedule_async_from_dict(): - await test_get_backup_schedule_async(request_type=dict) +async def test_create_backup_schedule_async_from_dict(): + await test_create_backup_schedule_async(request_type=dict) -def test_get_backup_schedule_field_headers(): +def test_create_backup_schedule_field_headers(): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = backup_schedule.GetBackupScheduleRequest() + request = gsad_backup_schedule.CreateBackupScheduleRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_backup_schedule), "__call__" + type(client.transport.create_backup_schedule), "__call__" ) as call: - call.return_value = backup_schedule.BackupSchedule() - client.get_backup_schedule(request) + call.return_value = gsad_backup_schedule.BackupSchedule() + client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 @@ -9605,30 +9547,30 @@ def test_get_backup_schedule_field_headers(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] @pytest.mark.asyncio -async def test_get_backup_schedule_field_headers_async(): +async def test_create_backup_schedule_field_headers_async(): client = DatabaseAdminAsyncClient( credentials=async_anonymous_credentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. - request = backup_schedule.GetBackupScheduleRequest() + request = gsad_backup_schedule.CreateBackupScheduleRequest() - request.name = "name_value" + request.parent = "parent_value" # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_backup_schedule), "__call__" + type(client.transport.create_backup_schedule), "__call__" ) as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - backup_schedule.BackupSchedule() + gsad_backup_schedule.BackupSchedule() ) - await client.get_backup_schedule(request) + await client.create_backup_schedule(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) @@ -9639,25 +9581,387 @@ async def test_get_backup_schedule_field_headers_async(): _, _, kw = call.mock_calls[0] assert ( "x-goog-request-params", - "name=name_value", + "parent=parent_value", ) in kw["metadata"] -def test_get_backup_schedule_flattened(): +def test_create_backup_schedule_flattened(): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( - type(client.transport.get_backup_schedule), "__call__" + type(client.transport.create_backup_schedule), "__call__" ) as call: # Designate an appropriate return value for the call. - call.return_value = backup_schedule.BackupSchedule() + call.return_value = gsad_backup_schedule.BackupSchedule() # Call the method with a truthy value for each flattened field, # using the keyword arguments to the method. - client.get_backup_schedule( - name="name_value", + client.create_backup_schedule( + parent="parent_value", + backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), + backup_schedule_id="backup_schedule_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_schedule + mock_val = gsad_backup_schedule.BackupSchedule(name="name_value") + assert arg == mock_val + arg = args[0].backup_schedule_id + mock_val = "backup_schedule_id_value" + assert arg == mock_val + + +def test_create_backup_schedule_flattened_error(): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup_schedule( + gsad_backup_schedule.CreateBackupScheduleRequest(), + parent="parent_value", + backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), + backup_schedule_id="backup_schedule_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_backup_schedule_flattened_async(): + client = DatabaseAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup_schedule), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = gsad_backup_schedule.BackupSchedule() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + gsad_backup_schedule.BackupSchedule() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_backup_schedule( + parent="parent_value", + backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), + backup_schedule_id="backup_schedule_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_schedule + mock_val = gsad_backup_schedule.BackupSchedule(name="name_value") + assert arg == mock_val + arg = args[0].backup_schedule_id + mock_val = "backup_schedule_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_backup_schedule_flattened_error_async(): + client = DatabaseAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_backup_schedule( + gsad_backup_schedule.CreateBackupScheduleRequest(), + parent="parent_value", + backup_schedule=gsad_backup_schedule.BackupSchedule(name="name_value"), + backup_schedule_id="backup_schedule_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + backup_schedule.GetBackupScheduleRequest, + dict, + ], +) +def test_get_backup_schedule(request_type, transport: str = "grpc"): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup_schedule), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = backup_schedule.BackupSchedule( + name="name_value", + ) + response = client.get_backup_schedule(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + request = backup_schedule.GetBackupScheduleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, backup_schedule.BackupSchedule) + assert response.name == "name_value" + + +def test_get_backup_schedule_non_empty_request_with_auto_populated_field(): + # This test is a coverage failsafe to make sure that UUID4 fields are + # automatically populated, according to AIP-4235, with non-empty requests. + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Populate all string fields in the request which are not UUID4 + # since we want to check that UUID4 are populated automatically + # if they meet the requirements of AIP 4235. + request = backup_schedule.GetBackupScheduleRequest( + name="name_value", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup_schedule), "__call__" + ) as call: + call.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client.get_backup_schedule(request=request) + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == backup_schedule.GetBackupScheduleRequest( + name="name_value", + ) + + +def test_get_backup_schedule_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._transport.get_backup_schedule in client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.get_backup_schedule + ] = mock_rpc + request = {} + client.get_backup_schedule(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.get_backup_schedule(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_backup_schedule_async_use_cached_wrapped_rpc( + transport: str = "grpc_asyncio", +): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method_async.wrap_method") as wrapper_fn: + client = DatabaseAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert ( + client._client._transport.get_backup_schedule + in client._client._transport._wrapped_methods + ) + + # Replace cached wrapped function with mock + mock_rpc = mock.AsyncMock() + mock_rpc.return_value = mock.Mock() + client._client._transport._wrapped_methods[ + client._client._transport.get_backup_schedule + ] = mock_rpc + + request = {} + await client.get_backup_schedule(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + await client.get_backup_schedule(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +@pytest.mark.asyncio +async def test_get_backup_schedule_async( + transport: str = "grpc_asyncio", + request_type=backup_schedule.GetBackupScheduleRequest, +): + client = DatabaseAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup_schedule), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup_schedule.BackupSchedule( + name="name_value", + ) + ) + response = await client.get_backup_schedule(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + request = backup_schedule.GetBackupScheduleRequest() + assert args[0] == request + + # Establish that the response is the type that we expect. + assert isinstance(response, backup_schedule.BackupSchedule) + assert response.name == "name_value" + + +@pytest.mark.asyncio +async def test_get_backup_schedule_async_from_dict(): + await test_get_backup_schedule_async(request_type=dict) + + +def test_get_backup_schedule_field_headers(): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup_schedule.GetBackupScheduleRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup_schedule), "__call__" + ) as call: + call.return_value = backup_schedule.BackupSchedule() + client.get_backup_schedule(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_backup_schedule_field_headers_async(): + client = DatabaseAdminAsyncClient( + credentials=async_anonymous_credentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = backup_schedule.GetBackupScheduleRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup_schedule), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + backup_schedule.BackupSchedule() + ) + await client.get_backup_schedule(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_backup_schedule_flattened(): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup_schedule), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = backup_schedule.BackupSchedule() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_backup_schedule( + name="name_value", ) # Establish that the underlying call was made with the expected @@ -11065,6 +11369,7 @@ def test_list_databases_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_databases(request) @@ -11118,6 +11423,7 @@ def test_list_databases_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_databases(**mock_args) @@ -11317,6 +11623,7 @@ def test_create_database_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_database(request) @@ -11369,6 +11676,7 @@ def test_create_database_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_database(**mock_args) @@ -11500,6 +11808,7 @@ def test_get_database_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_database(request) @@ -11547,6 +11856,7 @@ def test_get_database_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_database(**mock_args) @@ -11676,6 +11986,7 @@ def test_update_database_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_database(request) @@ -11730,6 +12041,7 @@ def test_update_database_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_database(**mock_args) @@ -11872,6 +12184,7 @@ def test_update_database_ddl_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_database_ddl(request) @@ -11926,6 +12239,7 @@ def test_update_database_ddl_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_database_ddl(**mock_args) @@ -12055,6 +12369,7 @@ def test_drop_database_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_database(request) @@ -12100,6 +12415,7 @@ def test_drop_database_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.drop_database(**mock_args) @@ -12235,6 +12551,7 @@ def test_get_database_ddl_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_database_ddl(request) @@ -12282,6 +12599,7 @@ def test_get_database_ddl_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_database_ddl(**mock_args) @@ -12412,6 +12730,7 @@ def test_set_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) @@ -12465,6 +12784,7 @@ def test_set_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(**mock_args) @@ -12595,6 +12915,7 @@ def test_get_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) @@ -12640,6 +12961,7 @@ def test_get_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(**mock_args) @@ -12778,6 +13100,7 @@ def test_test_iam_permissions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) @@ -12832,6 +13155,7 @@ def test_test_iam_permissions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(**mock_args) @@ -12980,6 +13304,7 @@ def test_create_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup(request) @@ -13045,6 +13370,7 @@ def test_create_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup(**mock_args) @@ -13185,6 +13511,7 @@ def test_copy_backup_rest_required_fields(request_type=backup.CopyBackupRequest) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.copy_backup(request) @@ -13241,6 +13568,7 @@ def test_copy_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.copy_backup(**mock_args) @@ -13373,6 +13701,7 @@ def test_get_backup_rest_required_fields(request_type=backup.GetBackupRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup(request) @@ -13418,6 +13747,7 @@ def test_get_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup(**mock_args) @@ -13546,6 +13876,7 @@ def test_update_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup(request) @@ -13602,6 +13933,7 @@ def test_update_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup(**mock_args) @@ -13729,6 +14061,7 @@ def test_delete_backup_rest_required_fields(request_type=backup.DeleteBackupRequ response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup(request) @@ -13772,6 +14105,7 @@ def test_delete_backup_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup(**mock_args) @@ -13908,6 +14242,7 @@ def test_list_backups_rest_required_fields(request_type=backup.ListBackupsReques response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backups(request) @@ -13962,6 +14297,7 @@ def test_list_backups_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backups(**mock_args) @@ -14161,6 +14497,7 @@ def test_restore_database_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.restore_database(request) @@ -14213,6 +14550,7 @@ def test_restore_database_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.restore_database(**mock_args) @@ -14361,6 +14699,7 @@ def test_list_database_operations_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_database_operations(request) @@ -14417,6 +14756,7 @@ def test_list_database_operations_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_database_operations(**mock_args) @@ -14625,6 +14965,7 @@ def test_list_backup_operations_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backup_operations(request) @@ -14679,6 +15020,7 @@ def test_list_backup_operations_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backup_operations(**mock_args) @@ -14886,6 +15228,7 @@ def test_list_database_roles_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_database_roles(request) @@ -14941,6 +15284,7 @@ def test_list_database_roles_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_database_roles(**mock_args) @@ -14970,70 +15314,265 @@ def test_list_database_roles_rest_flattened_error(transport: str = "rest"): ) -def test_list_database_roles_rest_pager(transport: str = "rest"): +def test_list_database_roles_rest_pager(transport: str = "rest"): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + # with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + spanner_database_admin.ListDatabaseRolesResponse( + database_roles=[ + spanner_database_admin.DatabaseRole(), + spanner_database_admin.DatabaseRole(), + spanner_database_admin.DatabaseRole(), + ], + next_page_token="abc", + ), + spanner_database_admin.ListDatabaseRolesResponse( + database_roles=[], + next_page_token="def", + ), + spanner_database_admin.ListDatabaseRolesResponse( + database_roles=[ + spanner_database_admin.DatabaseRole(), + ], + next_page_token="ghi", + ), + spanner_database_admin.ListDatabaseRolesResponse( + database_roles=[ + spanner_database_admin.DatabaseRole(), + spanner_database_admin.DatabaseRole(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple( + spanner_database_admin.ListDatabaseRolesResponse.to_json(x) + for x in response + ) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode("UTF-8") + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = { + "parent": "projects/sample1/instances/sample2/databases/sample3" + } + + pager = client.list_database_roles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, spanner_database_admin.DatabaseRole) for i in results) + + pages = list(client.list_database_roles(request=sample_request).pages) + for page_, token in zip(pages, ["abc", "def", "ghi", ""]): + assert page_.raw_page.next_page_token == token + + +def test_add_split_points_rest_use_cached_wrapped_rpc(): + # Clients should use _prep_wrapped_messages to create cached wrapped rpcs, + # instead of constructing them on each call + with mock.patch("google.api_core.gapic_v1.method.wrap_method") as wrapper_fn: + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Should wrap all calls on client creation + assert wrapper_fn.call_count > 0 + wrapper_fn.reset_mock() + + # Ensure method has been cached + assert client._transport.add_split_points in client._transport._wrapped_methods + + # Replace cached wrapped function with mock + mock_rpc = mock.Mock() + mock_rpc.return_value.name = ( + "foo" # operation_request.operation in compute client(s) expect a string. + ) + client._transport._wrapped_methods[ + client._transport.add_split_points + ] = mock_rpc + + request = {} + client.add_split_points(request) + + # Establish that the underlying gRPC stub method was called. + assert mock_rpc.call_count == 1 + + client.add_split_points(request) + + # Establish that a new wrapper was not created for this call + assert wrapper_fn.call_count == 0 + assert mock_rpc.call_count == 2 + + +def test_add_split_points_rest_required_fields( + request_type=spanner_database_admin.AddSplitPointsRequest, +): + transport_class = transports.DatabaseAdminRestTransport + + request_init = {} + request_init["database"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson(pb_request, use_integers_for_enums=False) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).add_split_points._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["database"] = "database_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).add_split_points._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "database" in jsonified_request + assert jsonified_request["database"] == "database_value" + + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = spanner_database_admin.AddSplitPointsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = spanner_database_admin.AddSplitPointsResponse.pb( + return_value + ) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + response = client.add_split_points(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_add_split_points_rest_unset_required_fields(): + transport = transports.DatabaseAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.add_split_points._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "database", + "splitPoints", + ) + ) + ) + + +def test_add_split_points_rest_flattened(): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner_database_admin.AddSplitPointsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = { + "database": "projects/sample1/instances/sample2/databases/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + database="database_value", + split_points=[spanner_database_admin.SplitPoints(table="table_value")], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = spanner_database_admin.AddSplitPointsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + + client.add_split_points(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints" + % client.transport._host, + args[1], + ) + + +def test_add_split_points_rest_flattened_error(transport: str = "rest"): client = DatabaseAdminClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, "request") as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - # with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - spanner_database_admin.ListDatabaseRolesResponse( - database_roles=[ - spanner_database_admin.DatabaseRole(), - spanner_database_admin.DatabaseRole(), - spanner_database_admin.DatabaseRole(), - ], - next_page_token="abc", - ), - spanner_database_admin.ListDatabaseRolesResponse( - database_roles=[], - next_page_token="def", - ), - spanner_database_admin.ListDatabaseRolesResponse( - database_roles=[ - spanner_database_admin.DatabaseRole(), - ], - next_page_token="ghi", - ), - spanner_database_admin.ListDatabaseRolesResponse( - database_roles=[ - spanner_database_admin.DatabaseRole(), - spanner_database_admin.DatabaseRole(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple( - spanner_database_admin.ListDatabaseRolesResponse.to_json(x) - for x in response + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_split_points( + spanner_database_admin.AddSplitPointsRequest(), + database="database_value", + split_points=[spanner_database_admin.SplitPoints(table="table_value")], ) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode("UTF-8") - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = { - "parent": "projects/sample1/instances/sample2/databases/sample3" - } - - pager = client.list_database_roles(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, spanner_database_admin.DatabaseRole) for i in results) - - pages = list(client.list_database_roles(request=sample_request).pages) - for page_, token in zip(pages, ["abc", "def", "ghi", ""]): - assert page_.raw_page.next_page_token == token def test_create_backup_schedule_rest_use_cached_wrapped_rpc(): @@ -15153,6 +15692,7 @@ def test_create_backup_schedule_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup_schedule(request) @@ -15217,6 +15757,7 @@ def test_create_backup_schedule_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup_schedule(**mock_args) @@ -15354,6 +15895,7 @@ def test_get_backup_schedule_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup_schedule(request) @@ -15401,6 +15943,7 @@ def test_get_backup_schedule_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup_schedule(**mock_args) @@ -15535,6 +16078,7 @@ def test_update_backup_schedule_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup_schedule(request) @@ -15593,6 +16137,7 @@ def test_update_backup_schedule_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup_schedule(**mock_args) @@ -15727,6 +16272,7 @@ def test_delete_backup_schedule_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup_schedule(request) @@ -15772,6 +16318,7 @@ def test_delete_backup_schedule_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup_schedule(**mock_args) @@ -15915,6 +16462,7 @@ def test_list_backup_schedules_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backup_schedules(request) @@ -15970,6 +16518,7 @@ def test_list_backup_schedules_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backup_schedules(**mock_args) @@ -16600,6 +17149,27 @@ def test_list_database_roles_empty_call_grpc(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_add_split_points_empty_call_grpc(): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: + call.return_value = spanner_database_admin.AddSplitPointsResponse() + client.add_split_points(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = spanner_database_admin.AddSplitPointsRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_backup_schedule_empty_call_grpc(): @@ -17288,6 +17858,31 @@ async def test_list_database_roles_empty_call_grpc_asyncio(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +@pytest.mark.asyncio +async def test_add_split_points_empty_call_grpc_asyncio(): + client = DatabaseAdminAsyncClient( + credentials=async_anonymous_credentials(), + transport="grpc_asyncio", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + spanner_database_admin.AddSplitPointsResponse() + ) + await client.add_split_points(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = spanner_database_admin.AddSplitPointsRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. @pytest.mark.asyncio @@ -17457,6 +18052,7 @@ def test_list_databases_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_databases(request) @@ -17492,6 +18088,7 @@ def test_list_databases_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_databases(request) # Establish that the response is the type that we expect. @@ -17516,10 +18113,13 @@ def test_list_databases_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_list_databases" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_list_databases_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_list_databases" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.ListDatabasesRequest.pb( spanner_database_admin.ListDatabasesRequest() ) @@ -17532,6 +18132,7 @@ def test_list_databases_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_database_admin.ListDatabasesResponse.to_json( spanner_database_admin.ListDatabasesResponse() ) @@ -17544,6 +18145,10 @@ def test_list_databases_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_database_admin.ListDatabasesResponse() + post_with_metadata.return_value = ( + spanner_database_admin.ListDatabasesResponse(), + metadata, + ) client.list_databases( request, @@ -17555,6 +18160,7 @@ def test_list_databases_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_database_rest_bad_request( @@ -17578,6 +18184,7 @@ def test_create_database_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_database(request) @@ -17608,6 +18215,7 @@ def test_create_database_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_database(request) # Establish that the response is the type that we expect. @@ -17633,10 +18241,13 @@ def test_create_database_rest_interceptors(null_interceptor): ), mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_create_database" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_create_database_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_create_database" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.CreateDatabaseRequest.pb( spanner_database_admin.CreateDatabaseRequest() ) @@ -17649,6 +18260,7 @@ def test_create_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -17659,6 +18271,7 @@ def test_create_database_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_database( request, @@ -17670,6 +18283,7 @@ def test_create_database_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_database_rest_bad_request( @@ -17693,6 +18307,7 @@ def test_get_database_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_database(request) @@ -17734,6 +18349,7 @@ def test_get_database_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_database(request) # Establish that the response is the type that we expect. @@ -17764,10 +18380,13 @@ def test_get_database_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_get_database" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_get_database_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_get_database" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.GetDatabaseRequest.pb( spanner_database_admin.GetDatabaseRequest() ) @@ -17780,6 +18399,7 @@ def test_get_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_database_admin.Database.to_json( spanner_database_admin.Database() ) @@ -17792,6 +18412,7 @@ def test_get_database_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_database_admin.Database() + post_with_metadata.return_value = spanner_database_admin.Database(), metadata client.get_database( request, @@ -17803,6 +18424,7 @@ def test_get_database_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_database_rest_bad_request( @@ -17828,6 +18450,7 @@ def test_update_database_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_database(request) @@ -17967,6 +18590,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_database(request) # Establish that the response is the type that we expect. @@ -17992,10 +18616,13 @@ def test_update_database_rest_interceptors(null_interceptor): ), mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_update_database" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_update_database_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_update_database" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.UpdateDatabaseRequest.pb( spanner_database_admin.UpdateDatabaseRequest() ) @@ -18008,6 +18635,7 @@ def test_update_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -18018,6 +18646,7 @@ def test_update_database_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_database( request, @@ -18029,6 +18658,7 @@ def test_update_database_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_database_ddl_rest_bad_request( @@ -18052,6 +18682,7 @@ def test_update_database_ddl_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_database_ddl(request) @@ -18082,6 +18713,7 @@ def test_update_database_ddl_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_database_ddl(request) # Establish that the response is the type that we expect. @@ -18107,10 +18739,14 @@ def test_update_database_ddl_rest_interceptors(null_interceptor): ), mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_update_database_ddl" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_update_database_ddl_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_update_database_ddl" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.UpdateDatabaseDdlRequest.pb( spanner_database_admin.UpdateDatabaseDdlRequest() ) @@ -18123,6 +18759,7 @@ def test_update_database_ddl_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -18133,6 +18770,7 @@ def test_update_database_ddl_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_database_ddl( request, @@ -18144,6 +18782,7 @@ def test_update_database_ddl_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_drop_database_rest_bad_request( @@ -18167,6 +18806,7 @@ def test_drop_database_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.drop_database(request) @@ -18197,6 +18837,7 @@ def test_drop_database_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_database(request) # Establish that the response is the type that we expect. @@ -18233,6 +18874,7 @@ def test_drop_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = spanner_database_admin.DropDatabaseRequest() metadata = [ @@ -18273,6 +18915,7 @@ def test_get_database_ddl_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_database_ddl(request) @@ -18309,6 +18952,7 @@ def test_get_database_ddl_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_database_ddl(request) # Establish that the response is the type that we expect. @@ -18334,10 +18978,13 @@ def test_get_database_ddl_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_get_database_ddl" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_get_database_ddl_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_get_database_ddl" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.GetDatabaseDdlRequest.pb( spanner_database_admin.GetDatabaseDdlRequest() ) @@ -18350,6 +18997,7 @@ def test_get_database_ddl_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_database_admin.GetDatabaseDdlResponse.to_json( spanner_database_admin.GetDatabaseDdlResponse() ) @@ -18362,6 +19010,10 @@ def test_get_database_ddl_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_database_admin.GetDatabaseDdlResponse() + post_with_metadata.return_value = ( + spanner_database_admin.GetDatabaseDdlResponse(), + metadata, + ) client.get_database_ddl( request, @@ -18373,6 +19025,7 @@ def test_get_database_ddl_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_set_iam_policy_rest_bad_request( @@ -18396,6 +19049,7 @@ def test_set_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(request) @@ -18429,6 +19083,7 @@ def test_set_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) # Establish that the response is the type that we expect. @@ -18454,10 +19109,13 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_set_iam_policy" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_set_iam_policy_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_set_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.SetIamPolicyRequest() transcode.return_value = { "method": "post", @@ -18468,6 +19126,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -18478,6 +19137,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata client.set_iam_policy( request, @@ -18489,6 +19149,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_iam_policy_rest_bad_request( @@ -18512,6 +19173,7 @@ def test_get_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(request) @@ -18545,6 +19207,7 @@ def test_get_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) # Establish that the response is the type that we expect. @@ -18570,10 +19233,13 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_get_iam_policy_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", @@ -18584,6 +19250,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -18594,6 +19261,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata client.get_iam_policy( request, @@ -18605,6 +19273,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_test_iam_permissions_rest_bad_request( @@ -18628,6 +19297,7 @@ def test_test_iam_permissions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(request) @@ -18660,6 +19330,7 @@ def test_test_iam_permissions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. @@ -18684,10 +19355,14 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_test_iam_permissions" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_test_iam_permissions" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.TestIamPermissionsRequest() transcode.return_value = { "method": "post", @@ -18698,6 +19373,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson( iam_policy_pb2.TestIamPermissionsResponse() ) @@ -18710,6 +19386,10 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post_with_metadata.return_value = ( + iam_policy_pb2.TestIamPermissionsResponse(), + metadata, + ) client.test_iam_permissions( request, @@ -18721,6 +19401,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_backup_rest_bad_request(request_type=gsad_backup.CreateBackupRequest): @@ -18742,6 +19423,7 @@ def test_create_backup_rest_bad_request(request_type=gsad_backup.CreateBackupReq response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup(request) @@ -18797,6 +19479,7 @@ def test_create_backup_rest_call_success(request_type): "backup_schedules": ["backup_schedules_value1", "backup_schedules_value2"], "incremental_backup_chain_id": "incremental_backup_chain_id_value", "oldest_version_time": {}, + "instance_partitions": [{"instance_partition": "instance_partition_value"}], } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -18878,6 +19561,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup(request) # Establish that the response is the type that we expect. @@ -18903,10 +19587,13 @@ def test_create_backup_rest_interceptors(null_interceptor): ), mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_create_backup" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_create_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_create_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = gsad_backup.CreateBackupRequest.pb( gsad_backup.CreateBackupRequest() ) @@ -18919,6 +19606,7 @@ def test_create_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -18929,6 +19617,7 @@ def test_create_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_backup( request, @@ -18940,6 +19629,7 @@ def test_create_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_copy_backup_rest_bad_request(request_type=backup.CopyBackupRequest): @@ -18961,6 +19651,7 @@ def test_copy_backup_rest_bad_request(request_type=backup.CopyBackupRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.copy_backup(request) @@ -18991,6 +19682,7 @@ def test_copy_backup_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.copy_backup(request) # Establish that the response is the type that we expect. @@ -19016,10 +19708,13 @@ def test_copy_backup_rest_interceptors(null_interceptor): ), mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_copy_backup" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_copy_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_copy_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = backup.CopyBackupRequest.pb(backup.CopyBackupRequest()) transcode.return_value = { "method": "post", @@ -19030,6 +19725,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19040,6 +19736,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.copy_backup( request, @@ -19051,6 +19748,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_backup_rest_bad_request(request_type=backup.GetBackupRequest): @@ -19072,6 +19770,7 @@ def test_get_backup_rest_bad_request(request_type=backup.GetBackupRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup(request) @@ -19117,6 +19816,7 @@ def test_get_backup_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup(request) # Establish that the response is the type that we expect. @@ -19151,10 +19851,13 @@ def test_get_backup_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_get_backup" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_get_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_get_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = backup.GetBackupRequest.pb(backup.GetBackupRequest()) transcode.return_value = { "method": "post", @@ -19165,6 +19868,7 @@ def test_get_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = backup.Backup.to_json(backup.Backup()) req.return_value.content = return_value @@ -19175,6 +19879,7 @@ def test_get_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = backup.Backup() + post_with_metadata.return_value = backup.Backup(), metadata client.get_backup( request, @@ -19186,6 +19891,7 @@ def test_get_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_backup_rest_bad_request(request_type=gsad_backup.UpdateBackupRequest): @@ -19209,6 +19915,7 @@ def test_update_backup_rest_bad_request(request_type=gsad_backup.UpdateBackupReq response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup(request) @@ -19266,6 +19973,7 @@ def test_update_backup_rest_call_success(request_type): "backup_schedules": ["backup_schedules_value1", "backup_schedules_value2"], "incremental_backup_chain_id": "incremental_backup_chain_id_value", "oldest_version_time": {}, + "instance_partitions": [{"instance_partition": "instance_partition_value"}], } # The version of a generated dependency at test runtime may differ from the version used during generation. # Delete any fields which are not present in the current runtime dependency @@ -19362,6 +20070,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup(request) # Establish that the response is the type that we expect. @@ -19396,10 +20105,13 @@ def test_update_backup_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_update_backup" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_update_backup_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_update_backup" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = gsad_backup.UpdateBackupRequest.pb( gsad_backup.UpdateBackupRequest() ) @@ -19412,6 +20124,7 @@ def test_update_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gsad_backup.Backup.to_json(gsad_backup.Backup()) req.return_value.content = return_value @@ -19422,6 +20135,7 @@ def test_update_backup_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = gsad_backup.Backup() + post_with_metadata.return_value = gsad_backup.Backup(), metadata client.update_backup( request, @@ -19433,6 +20147,7 @@ def test_update_backup_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_backup_rest_bad_request(request_type=backup.DeleteBackupRequest): @@ -19454,6 +20169,7 @@ def test_delete_backup_rest_bad_request(request_type=backup.DeleteBackupRequest) response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup(request) @@ -19484,6 +20200,7 @@ def test_delete_backup_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup(request) # Establish that the response is the type that we expect. @@ -19518,6 +20235,7 @@ def test_delete_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = backup.DeleteBackupRequest() metadata = [ @@ -19556,6 +20274,7 @@ def test_list_backups_rest_bad_request(request_type=backup.ListBackupsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backups(request) @@ -19591,6 +20310,7 @@ def test_list_backups_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backups(request) # Establish that the response is the type that we expect. @@ -19615,10 +20335,13 @@ def test_list_backups_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_list_backups" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_list_backups_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_list_backups" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = backup.ListBackupsRequest.pb(backup.ListBackupsRequest()) transcode.return_value = { "method": "post", @@ -19629,6 +20352,7 @@ def test_list_backups_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = backup.ListBackupsResponse.to_json(backup.ListBackupsResponse()) req.return_value.content = return_value @@ -19639,6 +20363,7 @@ def test_list_backups_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = backup.ListBackupsResponse() + post_with_metadata.return_value = backup.ListBackupsResponse(), metadata client.list_backups( request, @@ -19650,6 +20375,7 @@ def test_list_backups_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_restore_database_rest_bad_request( @@ -19673,6 +20399,7 @@ def test_restore_database_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.restore_database(request) @@ -19703,6 +20430,7 @@ def test_restore_database_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.restore_database(request) # Establish that the response is the type that we expect. @@ -19728,10 +20456,13 @@ def test_restore_database_rest_interceptors(null_interceptor): ), mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_restore_database" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_restore_database_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_restore_database" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.RestoreDatabaseRequest.pb( spanner_database_admin.RestoreDatabaseRequest() ) @@ -19744,6 +20475,7 @@ def test_restore_database_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19754,6 +20486,7 @@ def test_restore_database_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.restore_database( request, @@ -19765,6 +20498,7 @@ def test_restore_database_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_database_operations_rest_bad_request( @@ -19788,6 +20522,7 @@ def test_list_database_operations_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_database_operations(request) @@ -19825,6 +20560,7 @@ def test_list_database_operations_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_database_operations(request) # Establish that the response is the type that we expect. @@ -19849,10 +20585,14 @@ def test_list_database_operations_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_list_database_operations" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_list_database_operations_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_list_database_operations" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.ListDatabaseOperationsRequest.pb( spanner_database_admin.ListDatabaseOperationsRequest() ) @@ -19865,6 +20605,7 @@ def test_list_database_operations_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_database_admin.ListDatabaseOperationsResponse.to_json( spanner_database_admin.ListDatabaseOperationsResponse() ) @@ -19877,6 +20618,10 @@ def test_list_database_operations_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_database_admin.ListDatabaseOperationsResponse() + post_with_metadata.return_value = ( + spanner_database_admin.ListDatabaseOperationsResponse(), + metadata, + ) client.list_database_operations( request, @@ -19888,6 +20633,7 @@ def test_list_database_operations_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_backup_operations_rest_bad_request( @@ -19911,6 +20657,7 @@ def test_list_backup_operations_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backup_operations(request) @@ -19946,6 +20693,7 @@ def test_list_backup_operations_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backup_operations(request) # Establish that the response is the type that we expect. @@ -19970,10 +20718,14 @@ def test_list_backup_operations_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_list_backup_operations" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_list_backup_operations_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_list_backup_operations" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = backup.ListBackupOperationsRequest.pb( backup.ListBackupOperationsRequest() ) @@ -19986,6 +20738,7 @@ def test_list_backup_operations_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = backup.ListBackupOperationsResponse.to_json( backup.ListBackupOperationsResponse() ) @@ -19998,6 +20751,10 @@ def test_list_backup_operations_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = backup.ListBackupOperationsResponse() + post_with_metadata.return_value = ( + backup.ListBackupOperationsResponse(), + metadata, + ) client.list_backup_operations( request, @@ -20009,6 +20766,7 @@ def test_list_backup_operations_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_database_roles_rest_bad_request( @@ -20032,6 +20790,7 @@ def test_list_database_roles_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_database_roles(request) @@ -20067,6 +20826,7 @@ def test_list_database_roles_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_database_roles(request) # Establish that the response is the type that we expect. @@ -20091,10 +20851,14 @@ def test_list_database_roles_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_list_database_roles" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_list_database_roles_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_list_database_roles" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_database_admin.ListDatabaseRolesRequest.pb( spanner_database_admin.ListDatabaseRolesRequest() ) @@ -20107,6 +20871,7 @@ def test_list_database_roles_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_database_admin.ListDatabaseRolesResponse.to_json( spanner_database_admin.ListDatabaseRolesResponse() ) @@ -20119,6 +20884,10 @@ def test_list_database_roles_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_database_admin.ListDatabaseRolesResponse() + post_with_metadata.return_value = ( + spanner_database_admin.ListDatabaseRolesResponse(), + metadata, + ) client.list_database_roles( request, @@ -20130,6 +20899,136 @@ def test_list_database_roles_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() + + +def test_add_split_points_rest_bad_request( + request_type=spanner_database_admin.AddSplitPointsRequest, +): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + # send a request that will satisfy transcoding + request_init = {"database": "projects/sample1/instances/sample2/databases/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = mock.Mock() + json_return_value = "" + response_value.json = mock.Mock(return_value={}) + response_value.status_code = 400 + response_value.request = mock.Mock() + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + client.add_split_points(request) + + +@pytest.mark.parametrize( + "request_type", + [ + spanner_database_admin.AddSplitPointsRequest, + dict, + ], +) +def test_add_split_points_rest_call_success(request_type): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + # send a request that will satisfy transcoding + request_init = {"database": "projects/sample1/instances/sample2/databases/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = spanner_database_admin.AddSplitPointsResponse() + + # Wrap the value into a proper Response obj + response_value = mock.Mock() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = spanner_database_admin.AddSplitPointsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value.content = json_return_value.encode("UTF-8") + req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + response = client.add_split_points(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, spanner_database_admin.AddSplitPointsResponse) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_add_split_points_rest_interceptors(null_interceptor): + transport = transports.DatabaseAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.DatabaseAdminRestInterceptor(), + ) + client = DatabaseAdminClient(transport=transport) + + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_add_split_points" + ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "post_add_split_points_with_metadata" + ) as post_with_metadata, mock.patch.object( + transports.DatabaseAdminRestInterceptor, "pre_add_split_points" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + post_with_metadata.assert_not_called() + pb_message = spanner_database_admin.AddSplitPointsRequest.pb( + spanner_database_admin.AddSplitPointsRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = mock.Mock() + req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} + return_value = spanner_database_admin.AddSplitPointsResponse.to_json( + spanner_database_admin.AddSplitPointsResponse() + ) + req.return_value.content = return_value + + request = spanner_database_admin.AddSplitPointsRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = spanner_database_admin.AddSplitPointsResponse() + post_with_metadata.return_value = ( + spanner_database_admin.AddSplitPointsResponse(), + metadata, + ) + + client.add_split_points( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_backup_schedule_rest_bad_request( @@ -20153,6 +21052,7 @@ def test_create_backup_schedule_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup_schedule(request) @@ -20276,6 +21176,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup_schedule(request) # Establish that the response is the type that we expect. @@ -20300,10 +21201,14 @@ def test_create_backup_schedule_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_create_backup_schedule" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_create_backup_schedule_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_create_backup_schedule" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = gsad_backup_schedule.CreateBackupScheduleRequest.pb( gsad_backup_schedule.CreateBackupScheduleRequest() ) @@ -20316,6 +21221,7 @@ def test_create_backup_schedule_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gsad_backup_schedule.BackupSchedule.to_json( gsad_backup_schedule.BackupSchedule() ) @@ -20328,6 +21234,10 @@ def test_create_backup_schedule_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = gsad_backup_schedule.BackupSchedule() + post_with_metadata.return_value = ( + gsad_backup_schedule.BackupSchedule(), + metadata, + ) client.create_backup_schedule( request, @@ -20339,6 +21249,7 @@ def test_create_backup_schedule_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_backup_schedule_rest_bad_request( @@ -20364,6 +21275,7 @@ def test_get_backup_schedule_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup_schedule(request) @@ -20401,6 +21313,7 @@ def test_get_backup_schedule_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup_schedule(request) # Establish that the response is the type that we expect. @@ -20425,10 +21338,14 @@ def test_get_backup_schedule_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_get_backup_schedule" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_get_backup_schedule_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_get_backup_schedule" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = backup_schedule.GetBackupScheduleRequest.pb( backup_schedule.GetBackupScheduleRequest() ) @@ -20441,6 +21358,7 @@ def test_get_backup_schedule_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = backup_schedule.BackupSchedule.to_json( backup_schedule.BackupSchedule() ) @@ -20453,6 +21371,7 @@ def test_get_backup_schedule_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = backup_schedule.BackupSchedule() + post_with_metadata.return_value = backup_schedule.BackupSchedule(), metadata client.get_backup_schedule( request, @@ -20464,6 +21383,7 @@ def test_get_backup_schedule_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_backup_schedule_rest_bad_request( @@ -20491,6 +21411,7 @@ def test_update_backup_schedule_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup_schedule(request) @@ -20618,6 +21539,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup_schedule(request) # Establish that the response is the type that we expect. @@ -20642,10 +21564,14 @@ def test_update_backup_schedule_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_update_backup_schedule" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_update_backup_schedule_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_update_backup_schedule" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = gsad_backup_schedule.UpdateBackupScheduleRequest.pb( gsad_backup_schedule.UpdateBackupScheduleRequest() ) @@ -20658,6 +21584,7 @@ def test_update_backup_schedule_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gsad_backup_schedule.BackupSchedule.to_json( gsad_backup_schedule.BackupSchedule() ) @@ -20670,6 +21597,10 @@ def test_update_backup_schedule_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = gsad_backup_schedule.BackupSchedule() + post_with_metadata.return_value = ( + gsad_backup_schedule.BackupSchedule(), + metadata, + ) client.update_backup_schedule( request, @@ -20681,6 +21612,7 @@ def test_update_backup_schedule_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_backup_schedule_rest_bad_request( @@ -20706,6 +21638,7 @@ def test_delete_backup_schedule_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup_schedule(request) @@ -20738,6 +21671,7 @@ def test_delete_backup_schedule_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup_schedule(request) # Establish that the response is the type that we expect. @@ -20774,6 +21708,7 @@ def test_delete_backup_schedule_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = backup_schedule.DeleteBackupScheduleRequest() metadata = [ @@ -20814,6 +21749,7 @@ def test_list_backup_schedules_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backup_schedules(request) @@ -20849,6 +21785,7 @@ def test_list_backup_schedules_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backup_schedules(request) # Establish that the response is the type that we expect. @@ -20873,10 +21810,14 @@ def test_list_backup_schedules_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.DatabaseAdminRestInterceptor, "post_list_backup_schedules" ) as post, mock.patch.object( + transports.DatabaseAdminRestInterceptor, + "post_list_backup_schedules_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.DatabaseAdminRestInterceptor, "pre_list_backup_schedules" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = backup_schedule.ListBackupSchedulesRequest.pb( backup_schedule.ListBackupSchedulesRequest() ) @@ -20889,6 +21830,7 @@ def test_list_backup_schedules_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = backup_schedule.ListBackupSchedulesResponse.to_json( backup_schedule.ListBackupSchedulesResponse() ) @@ -20901,6 +21843,10 @@ def test_list_backup_schedules_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = backup_schedule.ListBackupSchedulesResponse() + post_with_metadata.return_value = ( + backup_schedule.ListBackupSchedulesResponse(), + metadata, + ) client.list_backup_schedules( request, @@ -20912,6 +21858,7 @@ def test_list_backup_schedules_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_cancel_operation_rest_bad_request( @@ -20940,6 +21887,7 @@ def test_cancel_operation_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.cancel_operation(request) @@ -20972,6 +21920,7 @@ def test_cancel_operation_rest(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.cancel_operation(request) @@ -21005,6 +21954,7 @@ def test_delete_operation_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_operation(request) @@ -21037,6 +21987,7 @@ def test_delete_operation_rest(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_operation(request) @@ -21070,6 +22021,7 @@ def test_get_operation_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_operation(request) @@ -21102,6 +22054,7 @@ def test_get_operation_rest(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_operation(request) @@ -21133,6 +22086,7 @@ def test_list_operations_rest_bad_request( response_value.status_code = 400 response_value.request = Request() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_operations(request) @@ -21165,6 +22119,7 @@ def test_list_operations_rest(request_type): response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_operations(request) @@ -21589,6 +22544,26 @@ def test_list_database_roles_empty_call_rest(): assert args[0] == request_msg +# This test is a coverage failsafe to make sure that totally empty calls, +# i.e. request == None and no flattened fields passed, work. +def test_add_split_points_empty_call_rest(): + client = DatabaseAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the actual call, and fake the request. + with mock.patch.object(type(client.transport.add_split_points), "__call__") as call: + client.add_split_points(request=None) + + # Establish that the underlying stub method was called. + call.assert_called() + _, args, _ = call.mock_calls[0] + request_msg = spanner_database_admin.AddSplitPointsRequest() + + assert args[0] == request_msg + + # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. def test_create_backup_schedule_empty_call_rest(): @@ -21769,6 +22744,7 @@ def test_database_admin_base_transport(): "list_database_operations", "list_backup_operations", "list_database_roles", + "add_split_points", "create_backup_schedule", "get_backup_schedule", "update_backup_schedule", @@ -22113,6 +23089,9 @@ def test_database_admin_client_transport_session_collision(transport_name): session1 = client1.transport.list_database_roles._session session2 = client2.transport.list_database_roles._session assert session1 != session2 + session1 = client1.transport.add_split_points._session + session2 = client2.transport.add_split_points._session + assert session1 != session2 session1 = client1.transport.create_backup_schedule._session session2 = client2.transport.create_backup_schedule._session assert session1 != session2 @@ -22488,8 +23467,36 @@ def test_parse_instance_path(): assert expected == actual +def test_instance_partition_path(): + project = "whelk" + instance = "octopus" + instance_partition = "oyster" + expected = "projects/{project}/instances/{instance}/instancePartitions/{instance_partition}".format( + project=project, + instance=instance, + instance_partition=instance_partition, + ) + actual = DatabaseAdminClient.instance_partition_path( + project, instance, instance_partition + ) + assert expected == actual + + +def test_parse_instance_partition_path(): + expected = { + "project": "nudibranch", + "instance": "cuttlefish", + "instance_partition": "mussel", + } + path = DatabaseAdminClient.instance_partition_path(**expected) + + # Check that the path construction is reversible. + actual = DatabaseAdminClient.parse_instance_partition_path(path) + assert expected == actual + + def test_common_billing_account_path(): - billing_account = "whelk" + billing_account = "winkle" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -22499,7 +23506,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "octopus", + "billing_account": "nautilus", } path = DatabaseAdminClient.common_billing_account_path(**expected) @@ -22509,7 +23516,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "oyster" + folder = "scallop" expected = "folders/{folder}".format( folder=folder, ) @@ -22519,7 +23526,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nudibranch", + "folder": "abalone", } path = DatabaseAdminClient.common_folder_path(**expected) @@ -22529,7 +23536,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "cuttlefish" + organization = "squid" expected = "organizations/{organization}".format( organization=organization, ) @@ -22539,7 +23546,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "mussel", + "organization": "clam", } path = DatabaseAdminClient.common_organization_path(**expected) @@ -22549,7 +23556,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "winkle" + project = "whelk" expected = "projects/{project}".format( project=project, ) @@ -22559,7 +23566,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "nautilus", + "project": "octopus", } path = DatabaseAdminClient.common_project_path(**expected) @@ -22569,8 +23576,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "scallop" - location = "abalone" + project = "oyster" + location = "nudibranch" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -22581,8 +23588,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "squid", - "location": "clam", + "project": "cuttlefish", + "location": "mussel", } path = DatabaseAdminClient.common_location_path(**expected) diff --git a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py index 55df772e88..c3188125ac 100644 --- a/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py +++ b/tests/unit/gapic/spanner_admin_instance_v1/test_instance_admin.py @@ -79,6 +79,14 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + async def mock_async_gen(data, chunk_size=1): for i in range(0, len(data)): # pragma: NO COVER chunk = data[i : i + chunk_size] @@ -333,83 +341,46 @@ def test__get_universe_domain(): @pytest.mark.parametrize( - "client_class,transport_class,transport_name", + "error_code,cred_info_json,show_cred_info", [ - (InstanceAdminClient, transports.InstanceAdminGrpcTransport, "grpc"), - (InstanceAdminClient, transports.InstanceAdminRestTransport, "rest"), + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), ], ) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) - ) - assert client._validate_universe_domain() == True +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = InstanceAdminClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = InstanceAdminClient(credentials=cred) + client._transport._credentials = cred - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - transport = transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code - # TODO: This is needed to cater for older versions of google-auth - # Make this test unconditional once the minimum supported version of - # google-auth becomes 2.23.0 or higher. - google_auth_major, google_auth_minor = [ - int(part) for part in google.auth.__version__.split(".")[0:2] - ] - if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): - credentials = ga_credentials.AnonymousCredentials() - credentials._universe_domain = "foo.com" - # Test the case when there is a universe mismatch from the credentials. - client = client_class(transport=transport_class(credentials=credentials)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor = [ - int(part) for part in api_core_version.__version__.split(".")[0:2] - ] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class( - client_options={"universe_domain": "bar.com"}, - transport=transport_class( - credentials=ga_credentials.AnonymousCredentials(), - ), - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test that ValueError is raised if universe_domain is provided via client options and credentials is None - with pytest.raises(ValueError): - client._compare_universes("foo.bar", None) + client._add_cred_info_for_auth_errors(error) + assert error.details == [] @pytest.mark.parametrize( @@ -1739,6 +1710,9 @@ def test_get_instance_config(request_type, transport: str = "grpc"): leader_options=["leader_options_value"], reconciling=True, state=spanner_instance_admin.InstanceConfig.State.CREATING, + free_instance_availability=spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE, + quorum_type=spanner_instance_admin.InstanceConfig.QuorumType.REGION, + storage_limit_per_processing_unit=3540, ) response = client.get_instance_config(request) @@ -1761,6 +1735,14 @@ def test_get_instance_config(request_type, transport: str = "grpc"): assert response.leader_options == ["leader_options_value"] assert response.reconciling is True assert response.state == spanner_instance_admin.InstanceConfig.State.CREATING + assert ( + response.free_instance_availability + == spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE + ) + assert ( + response.quorum_type == spanner_instance_admin.InstanceConfig.QuorumType.REGION + ) + assert response.storage_limit_per_processing_unit == 3540 def test_get_instance_config_non_empty_request_with_auto_populated_field(): @@ -1903,6 +1885,9 @@ async def test_get_instance_config_async( leader_options=["leader_options_value"], reconciling=True, state=spanner_instance_admin.InstanceConfig.State.CREATING, + free_instance_availability=spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE, + quorum_type=spanner_instance_admin.InstanceConfig.QuorumType.REGION, + storage_limit_per_processing_unit=3540, ) ) response = await client.get_instance_config(request) @@ -1926,6 +1911,14 @@ async def test_get_instance_config_async( assert response.leader_options == ["leader_options_value"] assert response.reconciling is True assert response.state == spanner_instance_admin.InstanceConfig.State.CREATING + assert ( + response.free_instance_availability + == spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE + ) + assert ( + response.quorum_type == spanner_instance_admin.InstanceConfig.QuorumType.REGION + ) + assert response.storage_limit_per_processing_unit == 3540 @pytest.mark.asyncio @@ -4806,6 +4799,7 @@ def test_get_instance(request_type, transport: str = "grpc"): node_count=1070, processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, + instance_type=spanner_instance_admin.Instance.InstanceType.PROVISIONED, endpoint_uris=["endpoint_uris_value"], edition=spanner_instance_admin.Instance.Edition.STANDARD, default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.NONE, @@ -4826,6 +4820,10 @@ def test_get_instance(request_type, transport: str = "grpc"): assert response.node_count == 1070 assert response.processing_units == 1743 assert response.state == spanner_instance_admin.Instance.State.CREATING + assert ( + response.instance_type + == spanner_instance_admin.Instance.InstanceType.PROVISIONED + ) assert response.endpoint_uris == ["endpoint_uris_value"] assert response.edition == spanner_instance_admin.Instance.Edition.STANDARD assert ( @@ -4964,6 +4962,7 @@ async def test_get_instance_async( node_count=1070, processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, + instance_type=spanner_instance_admin.Instance.InstanceType.PROVISIONED, endpoint_uris=["endpoint_uris_value"], edition=spanner_instance_admin.Instance.Edition.STANDARD, default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.NONE, @@ -4985,6 +4984,10 @@ async def test_get_instance_async( assert response.node_count == 1070 assert response.processing_units == 1743 assert response.state == spanner_instance_admin.Instance.State.CREATING + assert ( + response.instance_type + == spanner_instance_admin.Instance.InstanceType.PROVISIONED + ) assert response.endpoint_uris == ["endpoint_uris_value"] assert response.edition == spanner_instance_admin.Instance.Edition.STANDARD assert ( @@ -9563,6 +9566,7 @@ def test_list_instance_configs_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_configs(request) @@ -9618,6 +9622,7 @@ def test_list_instance_configs_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_configs(**mock_args) @@ -9818,6 +9823,7 @@ def test_get_instance_config_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance_config(request) @@ -9863,6 +9869,7 @@ def test_get_instance_config_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance_config(**mock_args) @@ -10004,6 +10011,7 @@ def test_create_instance_config_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance_config(request) @@ -10058,6 +10066,7 @@ def test_create_instance_config_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance_config(**mock_args) @@ -10192,6 +10201,7 @@ def test_update_instance_config_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance_config(request) @@ -10246,6 +10256,7 @@ def test_update_instance_config_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance_config(**mock_args) @@ -10387,6 +10398,7 @@ def test_delete_instance_config_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance_config(request) @@ -10438,6 +10450,7 @@ def test_delete_instance_config_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance_config(**mock_args) @@ -10585,6 +10598,7 @@ def test_list_instance_config_operations_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_config_operations(request) @@ -10643,6 +10657,7 @@ def test_list_instance_config_operations_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_config_operations(**mock_args) @@ -10849,6 +10864,7 @@ def test_list_instances_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instances(request) @@ -10904,6 +10920,7 @@ def test_list_instances_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instances(**mock_args) @@ -11111,6 +11128,7 @@ def test_list_instance_partitions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_partitions(request) @@ -11167,6 +11185,7 @@ def test_list_instance_partitions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_partitions(**mock_args) @@ -11366,6 +11385,7 @@ def test_get_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance(request) @@ -11411,6 +11431,7 @@ def test_get_instance_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance(**mock_args) @@ -11546,6 +11567,7 @@ def test_create_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance(request) @@ -11600,6 +11622,7 @@ def test_create_instance_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance(**mock_args) @@ -11728,6 +11751,7 @@ def test_update_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance(request) @@ -11780,6 +11804,7 @@ def test_update_instance_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance(**mock_args) @@ -11908,6 +11933,7 @@ def test_delete_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance(request) @@ -11951,6 +11977,7 @@ def test_delete_instance_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance(**mock_args) @@ -12079,6 +12106,7 @@ def test_set_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) @@ -12130,6 +12158,7 @@ def test_set_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(**mock_args) @@ -12260,6 +12289,7 @@ def test_get_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) @@ -12303,6 +12333,7 @@ def test_get_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(**mock_args) @@ -12441,6 +12472,7 @@ def test_test_iam_permissions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) @@ -12493,6 +12525,7 @@ def test_test_iam_permissions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(**mock_args) @@ -12630,6 +12663,7 @@ def test_get_instance_partition_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance_partition(request) @@ -12677,6 +12711,7 @@ def test_get_instance_partition_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance_partition(**mock_args) @@ -12819,6 +12854,7 @@ def test_create_instance_partition_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance_partition(request) @@ -12875,6 +12911,7 @@ def test_create_instance_partition_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance_partition(**mock_args) @@ -13014,6 +13051,7 @@ def test_delete_instance_partition_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance_partition(request) @@ -13059,6 +13097,7 @@ def test_delete_instance_partition_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance_partition(**mock_args) @@ -13192,6 +13231,7 @@ def test_update_instance_partition_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance_partition(request) @@ -13250,6 +13290,7 @@ def test_update_instance_partition_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance_partition(**mock_args) @@ -13402,6 +13443,7 @@ def test_list_instance_partition_operations_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_partition_operations(request) @@ -13463,6 +13505,7 @@ def test_list_instance_partition_operations_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_partition_operations(**mock_args) @@ -13668,6 +13711,7 @@ def test_move_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.move_instance(request) @@ -14337,6 +14381,9 @@ async def test_get_instance_config_empty_call_grpc_asyncio(): leader_options=["leader_options_value"], reconciling=True, state=spanner_instance_admin.InstanceConfig.State.CREATING, + free_instance_availability=spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE, + quorum_type=spanner_instance_admin.InstanceConfig.QuorumType.REGION, + storage_limit_per_processing_unit=3540, ) ) await client.get_instance_config(request=None) @@ -14535,6 +14582,7 @@ async def test_get_instance_empty_call_grpc_asyncio(): node_count=1070, processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, + instance_type=spanner_instance_admin.Instance.InstanceType.PROVISIONED, endpoint_uris=["endpoint_uris_value"], edition=spanner_instance_admin.Instance.Edition.STANDARD, default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.NONE, @@ -14907,6 +14955,7 @@ def test_list_instance_configs_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_configs(request) @@ -14944,6 +14993,7 @@ def test_list_instance_configs_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_configs(request) # Establish that the response is the type that we expect. @@ -14968,10 +15018,14 @@ def test_list_instance_configs_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_list_instance_configs" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_list_instance_configs_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_list_instance_configs" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.ListInstanceConfigsRequest.pb( spanner_instance_admin.ListInstanceConfigsRequest() ) @@ -14984,6 +15038,7 @@ def test_list_instance_configs_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_instance_admin.ListInstanceConfigsResponse.to_json( spanner_instance_admin.ListInstanceConfigsResponse() ) @@ -14996,6 +15051,10 @@ def test_list_instance_configs_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_instance_admin.ListInstanceConfigsResponse() + post_with_metadata.return_value = ( + spanner_instance_admin.ListInstanceConfigsResponse(), + metadata, + ) client.list_instance_configs( request, @@ -15007,6 +15066,7 @@ def test_list_instance_configs_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_instance_config_rest_bad_request( @@ -15030,6 +15090,7 @@ def test_get_instance_config_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance_config(request) @@ -15061,6 +15122,9 @@ def test_get_instance_config_rest_call_success(request_type): leader_options=["leader_options_value"], reconciling=True, state=spanner_instance_admin.InstanceConfig.State.CREATING, + free_instance_availability=spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE, + quorum_type=spanner_instance_admin.InstanceConfig.QuorumType.REGION, + storage_limit_per_processing_unit=3540, ) # Wrap the value into a proper Response obj @@ -15072,6 +15136,7 @@ def test_get_instance_config_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance_config(request) # Establish that the response is the type that we expect. @@ -15087,6 +15152,14 @@ def test_get_instance_config_rest_call_success(request_type): assert response.leader_options == ["leader_options_value"] assert response.reconciling is True assert response.state == spanner_instance_admin.InstanceConfig.State.CREATING + assert ( + response.free_instance_availability + == spanner_instance_admin.InstanceConfig.FreeInstanceAvailability.AVAILABLE + ) + assert ( + response.quorum_type == spanner_instance_admin.InstanceConfig.QuorumType.REGION + ) + assert response.storage_limit_per_processing_unit == 3540 @pytest.mark.parametrize("null_interceptor", [True, False]) @@ -15106,10 +15179,14 @@ def test_get_instance_config_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_get_instance_config" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_get_instance_config_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_get_instance_config" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.GetInstanceConfigRequest.pb( spanner_instance_admin.GetInstanceConfigRequest() ) @@ -15122,6 +15199,7 @@ def test_get_instance_config_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_instance_admin.InstanceConfig.to_json( spanner_instance_admin.InstanceConfig() ) @@ -15134,6 +15212,10 @@ def test_get_instance_config_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_instance_admin.InstanceConfig() + post_with_metadata.return_value = ( + spanner_instance_admin.InstanceConfig(), + metadata, + ) client.get_instance_config( request, @@ -15145,6 +15227,7 @@ def test_get_instance_config_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_instance_config_rest_bad_request( @@ -15168,6 +15251,7 @@ def test_create_instance_config_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance_config(request) @@ -15198,6 +15282,7 @@ def test_create_instance_config_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance_config(request) # Establish that the response is the type that we expect. @@ -15223,10 +15308,14 @@ def test_create_instance_config_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_create_instance_config" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_create_instance_config_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_create_instance_config" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.CreateInstanceConfigRequest.pb( spanner_instance_admin.CreateInstanceConfigRequest() ) @@ -15239,6 +15328,7 @@ def test_create_instance_config_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -15249,6 +15339,7 @@ def test_create_instance_config_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_instance_config( request, @@ -15260,6 +15351,7 @@ def test_create_instance_config_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_instance_config_rest_bad_request( @@ -15285,6 +15377,7 @@ def test_update_instance_config_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance_config(request) @@ -15317,6 +15410,7 @@ def test_update_instance_config_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance_config(request) # Establish that the response is the type that we expect. @@ -15342,10 +15436,14 @@ def test_update_instance_config_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_update_instance_config" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_update_instance_config_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_update_instance_config" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.UpdateInstanceConfigRequest.pb( spanner_instance_admin.UpdateInstanceConfigRequest() ) @@ -15358,6 +15456,7 @@ def test_update_instance_config_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -15368,6 +15467,7 @@ def test_update_instance_config_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_instance_config( request, @@ -15379,6 +15479,7 @@ def test_update_instance_config_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_instance_config_rest_bad_request( @@ -15402,6 +15503,7 @@ def test_delete_instance_config_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance_config(request) @@ -15432,6 +15534,7 @@ def test_delete_instance_config_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance_config(request) # Establish that the response is the type that we expect. @@ -15468,6 +15571,7 @@ def test_delete_instance_config_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = spanner_instance_admin.DeleteInstanceConfigRequest() metadata = [ @@ -15508,6 +15612,7 @@ def test_list_instance_config_operations_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_config_operations(request) @@ -15545,6 +15650,7 @@ def test_list_instance_config_operations_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_config_operations(request) # Establish that the response is the type that we expect. @@ -15569,10 +15675,14 @@ def test_list_instance_config_operations_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_list_instance_config_operations" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_list_instance_config_operations_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_list_instance_config_operations" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.ListInstanceConfigOperationsRequest.pb( spanner_instance_admin.ListInstanceConfigOperationsRequest() ) @@ -15585,6 +15695,7 @@ def test_list_instance_config_operations_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = ( spanner_instance_admin.ListInstanceConfigOperationsResponse.to_json( spanner_instance_admin.ListInstanceConfigOperationsResponse() @@ -15601,6 +15712,10 @@ def test_list_instance_config_operations_rest_interceptors(null_interceptor): post.return_value = ( spanner_instance_admin.ListInstanceConfigOperationsResponse() ) + post_with_metadata.return_value = ( + spanner_instance_admin.ListInstanceConfigOperationsResponse(), + metadata, + ) client.list_instance_config_operations( request, @@ -15612,6 +15727,7 @@ def test_list_instance_config_operations_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_instances_rest_bad_request( @@ -15635,6 +15751,7 @@ def test_list_instances_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instances(request) @@ -15671,6 +15788,7 @@ def test_list_instances_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instances(request) # Establish that the response is the type that we expect. @@ -15696,10 +15814,13 @@ def test_list_instances_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_list_instances" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_list_instances_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_list_instances" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.ListInstancesRequest.pb( spanner_instance_admin.ListInstancesRequest() ) @@ -15712,6 +15833,7 @@ def test_list_instances_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_instance_admin.ListInstancesResponse.to_json( spanner_instance_admin.ListInstancesResponse() ) @@ -15724,6 +15846,10 @@ def test_list_instances_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_instance_admin.ListInstancesResponse() + post_with_metadata.return_value = ( + spanner_instance_admin.ListInstancesResponse(), + metadata, + ) client.list_instances( request, @@ -15735,6 +15861,7 @@ def test_list_instances_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_instance_partitions_rest_bad_request( @@ -15758,6 +15885,7 @@ def test_list_instance_partitions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_partitions(request) @@ -15796,6 +15924,7 @@ def test_list_instance_partitions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_partitions(request) # Establish that the response is the type that we expect. @@ -15821,10 +15950,14 @@ def test_list_instance_partitions_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_list_instance_partitions" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_list_instance_partitions_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_list_instance_partitions" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.ListInstancePartitionsRequest.pb( spanner_instance_admin.ListInstancePartitionsRequest() ) @@ -15837,6 +15970,7 @@ def test_list_instance_partitions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_instance_admin.ListInstancePartitionsResponse.to_json( spanner_instance_admin.ListInstancePartitionsResponse() ) @@ -15849,6 +15983,10 @@ def test_list_instance_partitions_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_instance_admin.ListInstancePartitionsResponse() + post_with_metadata.return_value = ( + spanner_instance_admin.ListInstancePartitionsResponse(), + metadata, + ) client.list_instance_partitions( request, @@ -15860,6 +15998,7 @@ def test_list_instance_partitions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_instance_rest_bad_request( @@ -15883,6 +16022,7 @@ def test_get_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance(request) @@ -15912,6 +16052,7 @@ def test_get_instance_rest_call_success(request_type): node_count=1070, processing_units=1743, state=spanner_instance_admin.Instance.State.CREATING, + instance_type=spanner_instance_admin.Instance.InstanceType.PROVISIONED, endpoint_uris=["endpoint_uris_value"], edition=spanner_instance_admin.Instance.Edition.STANDARD, default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.NONE, @@ -15926,6 +16067,7 @@ def test_get_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance(request) # Establish that the response is the type that we expect. @@ -15936,6 +16078,10 @@ def test_get_instance_rest_call_success(request_type): assert response.node_count == 1070 assert response.processing_units == 1743 assert response.state == spanner_instance_admin.Instance.State.CREATING + assert ( + response.instance_type + == spanner_instance_admin.Instance.InstanceType.PROVISIONED + ) assert response.endpoint_uris == ["endpoint_uris_value"] assert response.edition == spanner_instance_admin.Instance.Edition.STANDARD assert ( @@ -15961,10 +16107,13 @@ def test_get_instance_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_get_instance" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_get_instance_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_get_instance" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.GetInstanceRequest.pb( spanner_instance_admin.GetInstanceRequest() ) @@ -15977,6 +16126,7 @@ def test_get_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_instance_admin.Instance.to_json( spanner_instance_admin.Instance() ) @@ -15989,6 +16139,7 @@ def test_get_instance_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_instance_admin.Instance() + post_with_metadata.return_value = spanner_instance_admin.Instance(), metadata client.get_instance( request, @@ -16000,6 +16151,7 @@ def test_get_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_instance_rest_bad_request( @@ -16023,6 +16175,7 @@ def test_create_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance(request) @@ -16053,6 +16206,7 @@ def test_create_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance(request) # Establish that the response is the type that we expect. @@ -16078,10 +16232,13 @@ def test_create_instance_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_create_instance" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_create_instance_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_create_instance" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.CreateInstanceRequest.pb( spanner_instance_admin.CreateInstanceRequest() ) @@ -16094,6 +16251,7 @@ def test_create_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -16104,6 +16262,7 @@ def test_create_instance_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_instance( request, @@ -16115,6 +16274,7 @@ def test_create_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_update_instance_rest_bad_request( @@ -16138,6 +16298,7 @@ def test_update_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance(request) @@ -16168,6 +16329,7 @@ def test_update_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance(request) # Establish that the response is the type that we expect. @@ -16193,10 +16355,13 @@ def test_update_instance_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_update_instance" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_update_instance_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_update_instance" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.UpdateInstanceRequest.pb( spanner_instance_admin.UpdateInstanceRequest() ) @@ -16209,6 +16374,7 @@ def test_update_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -16219,6 +16385,7 @@ def test_update_instance_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_instance( request, @@ -16230,6 +16397,7 @@ def test_update_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_instance_rest_bad_request( @@ -16253,6 +16421,7 @@ def test_delete_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance(request) @@ -16283,6 +16452,7 @@ def test_delete_instance_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance(request) # Establish that the response is the type that we expect. @@ -16319,6 +16489,7 @@ def test_delete_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = spanner_instance_admin.DeleteInstanceRequest() metadata = [ @@ -16359,6 +16530,7 @@ def test_set_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(request) @@ -16392,6 +16564,7 @@ def test_set_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) # Establish that the response is the type that we expect. @@ -16417,10 +16590,13 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_set_iam_policy" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_set_iam_policy_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_set_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.SetIamPolicyRequest() transcode.return_value = { "method": "post", @@ -16431,6 +16607,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -16441,6 +16618,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata client.set_iam_policy( request, @@ -16452,6 +16630,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_iam_policy_rest_bad_request( @@ -16475,6 +16654,7 @@ def test_get_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(request) @@ -16508,6 +16688,7 @@ def test_get_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) # Establish that the response is the type that we expect. @@ -16533,10 +16714,13 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_get_iam_policy" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_get_iam_policy_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_get_iam_policy" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.GetIamPolicyRequest() transcode.return_value = { "method": "post", @@ -16547,6 +16731,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -16557,6 +16742,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = policy_pb2.Policy() + post_with_metadata.return_value = policy_pb2.Policy(), metadata client.get_iam_policy( request, @@ -16568,6 +16754,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_test_iam_permissions_rest_bad_request( @@ -16591,6 +16778,7 @@ def test_test_iam_permissions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(request) @@ -16623,6 +16811,7 @@ def test_test_iam_permissions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. @@ -16647,10 +16836,14 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_test_iam_permissions" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_test_iam_permissions_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_test_iam_permissions" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = iam_policy_pb2.TestIamPermissionsRequest() transcode.return_value = { "method": "post", @@ -16661,6 +16854,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson( iam_policy_pb2.TestIamPermissionsResponse() ) @@ -16673,6 +16867,10 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + post_with_metadata.return_value = ( + iam_policy_pb2.TestIamPermissionsResponse(), + metadata, + ) client.test_iam_permissions( request, @@ -16684,6 +16882,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_instance_partition_rest_bad_request( @@ -16709,6 +16908,7 @@ def test_get_instance_partition_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance_partition(request) @@ -16753,6 +16953,7 @@ def test_get_instance_partition_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance_partition(request) # Establish that the response is the type that we expect. @@ -16783,10 +16984,14 @@ def test_get_instance_partition_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.InstanceAdminRestInterceptor, "post_get_instance_partition" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_get_instance_partition_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_get_instance_partition" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.GetInstancePartitionRequest.pb( spanner_instance_admin.GetInstancePartitionRequest() ) @@ -16799,6 +17004,7 @@ def test_get_instance_partition_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner_instance_admin.InstancePartition.to_json( spanner_instance_admin.InstancePartition() ) @@ -16811,6 +17017,10 @@ def test_get_instance_partition_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner_instance_admin.InstancePartition() + post_with_metadata.return_value = ( + spanner_instance_admin.InstancePartition(), + metadata, + ) client.get_instance_partition( request, @@ -16822,6 +17032,7 @@ def test_get_instance_partition_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_create_instance_partition_rest_bad_request( @@ -16845,6 +17056,7 @@ def test_create_instance_partition_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance_partition(request) @@ -16875,6 +17087,7 @@ def test_create_instance_partition_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance_partition(request) # Establish that the response is the type that we expect. @@ -16900,10 +17113,14 @@ def test_create_instance_partition_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_create_instance_partition" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_create_instance_partition_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_create_instance_partition" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.CreateInstancePartitionRequest.pb( spanner_instance_admin.CreateInstancePartitionRequest() ) @@ -16916,6 +17133,7 @@ def test_create_instance_partition_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -16926,6 +17144,7 @@ def test_create_instance_partition_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.create_instance_partition( request, @@ -16937,6 +17156,7 @@ def test_create_instance_partition_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_instance_partition_rest_bad_request( @@ -16962,6 +17182,7 @@ def test_delete_instance_partition_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance_partition(request) @@ -16994,6 +17215,7 @@ def test_delete_instance_partition_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance_partition(request) # Establish that the response is the type that we expect. @@ -17030,6 +17252,7 @@ def test_delete_instance_partition_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = spanner_instance_admin.DeleteInstancePartitionRequest() metadata = [ @@ -17074,6 +17297,7 @@ def test_update_instance_partition_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance_partition(request) @@ -17108,6 +17332,7 @@ def test_update_instance_partition_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance_partition(request) # Establish that the response is the type that we expect. @@ -17133,10 +17358,14 @@ def test_update_instance_partition_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_update_instance_partition" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_update_instance_partition_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_update_instance_partition" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.UpdateInstancePartitionRequest.pb( spanner_instance_admin.UpdateInstancePartitionRequest() ) @@ -17149,6 +17378,7 @@ def test_update_instance_partition_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -17159,6 +17389,7 @@ def test_update_instance_partition_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.update_instance_partition( request, @@ -17170,6 +17401,7 @@ def test_update_instance_partition_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_instance_partition_operations_rest_bad_request( @@ -17193,6 +17425,7 @@ def test_list_instance_partition_operations_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instance_partition_operations(request) @@ -17233,6 +17466,7 @@ def test_list_instance_partition_operations_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instance_partition_operations(request) # Establish that the response is the type that we expect. @@ -17261,11 +17495,15 @@ def test_list_instance_partition_operations_rest_interceptors(null_interceptor): transports.InstanceAdminRestInterceptor, "post_list_instance_partition_operations", ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, + "post_list_instance_partition_operations_with_metadata", + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_list_instance_partition_operations", ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.ListInstancePartitionOperationsRequest.pb( spanner_instance_admin.ListInstancePartitionOperationsRequest() ) @@ -17278,6 +17516,7 @@ def test_list_instance_partition_operations_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = ( spanner_instance_admin.ListInstancePartitionOperationsResponse.to_json( spanner_instance_admin.ListInstancePartitionOperationsResponse() @@ -17294,6 +17533,10 @@ def test_list_instance_partition_operations_rest_interceptors(null_interceptor): post.return_value = ( spanner_instance_admin.ListInstancePartitionOperationsResponse() ) + post_with_metadata.return_value = ( + spanner_instance_admin.ListInstancePartitionOperationsResponse(), + metadata, + ) client.list_instance_partition_operations( request, @@ -17305,6 +17548,7 @@ def test_list_instance_partition_operations_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_move_instance_rest_bad_request( @@ -17328,6 +17572,7 @@ def test_move_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.move_instance(request) @@ -17358,6 +17603,7 @@ def test_move_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.move_instance(request) # Establish that the response is the type that we expect. @@ -17383,10 +17629,13 @@ def test_move_instance_rest_interceptors(null_interceptor): ), mock.patch.object( transports.InstanceAdminRestInterceptor, "post_move_instance" ) as post, mock.patch.object( + transports.InstanceAdminRestInterceptor, "post_move_instance_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.InstanceAdminRestInterceptor, "pre_move_instance" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner_instance_admin.MoveInstanceRequest.pb( spanner_instance_admin.MoveInstanceRequest() ) @@ -17399,6 +17648,7 @@ def test_move_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -17409,6 +17659,7 @@ def test_move_instance_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = operations_pb2.Operation() + post_with_metadata.return_value = operations_pb2.Operation(), metadata client.move_instance( request, @@ -17420,6 +17671,7 @@ def test_move_instance_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_initialize_client_w_rest(): diff --git a/tests/unit/gapic/spanner_v1/test_spanner.py b/tests/unit/gapic/spanner_v1/test_spanner.py index a1da7983a0..a1227d4861 100644 --- a/tests/unit/gapic/spanner_v1/test_spanner.py +++ b/tests/unit/gapic/spanner_v1/test_spanner.py @@ -72,6 +72,14 @@ import google.auth +CRED_INFO_JSON = { + "credential_source": "/path/to/file", + "credential_type": "service account credentials", + "principal": "service-account@example.com", +} +CRED_INFO_STRING = json.dumps(CRED_INFO_JSON) + + async def mock_async_gen(data, chunk_size=1): for i in range(0, len(data)): # pragma: NO COVER chunk = data[i : i + chunk_size] @@ -295,83 +303,46 @@ def test__get_universe_domain(): @pytest.mark.parametrize( - "client_class,transport_class,transport_name", + "error_code,cred_info_json,show_cred_info", [ - (SpannerClient, transports.SpannerGrpcTransport, "grpc"), - (SpannerClient, transports.SpannerRestTransport, "rest"), + (401, CRED_INFO_JSON, True), + (403, CRED_INFO_JSON, True), + (404, CRED_INFO_JSON, True), + (500, CRED_INFO_JSON, False), + (401, None, False), + (403, None, False), + (404, None, False), + (500, None, False), ], ) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class(credentials=ga_credentials.AnonymousCredentials()) - ) - assert client._validate_universe_domain() == True +def test__add_cred_info_for_auth_errors(error_code, cred_info_json, show_cred_info): + cred = mock.Mock(["get_cred_info"]) + cred.get_cred_info = mock.Mock(return_value=cred_info_json) + client = SpannerClient(credentials=cred) + client._transport._credentials = cred + + error = core_exceptions.GoogleAPICallError("message", details=["foo"]) + error.code = error_code + + client._add_cred_info_for_auth_errors(error) + if show_cred_info: + assert error.details == ["foo", CRED_INFO_STRING] + else: + assert error.details == ["foo"] - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True +@pytest.mark.parametrize("error_code", [401, 403, 404, 500]) +def test__add_cred_info_for_auth_errors_no_get_cred_info(error_code): + cred = mock.Mock([]) + assert not hasattr(cred, "get_cred_info") + client = SpannerClient(credentials=cred) + client._transport._credentials = cred - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel( - "http://localhost/", grpc.local_channel_credentials() - ) - transport = transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True + error = core_exceptions.GoogleAPICallError("message", details=[]) + error.code = error_code - # TODO: This is needed to cater for older versions of google-auth - # Make this test unconditional once the minimum supported version of - # google-auth becomes 2.23.0 or higher. - google_auth_major, google_auth_minor = [ - int(part) for part in google.auth.__version__.split(".")[0:2] - ] - if google_auth_major > 2 or (google_auth_major == 2 and google_auth_minor >= 23): - credentials = ga_credentials.AnonymousCredentials() - credentials._universe_domain = "foo.com" - # Test the case when there is a universe mismatch from the credentials. - client = client_class(transport=transport_class(credentials=credentials)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor = [ - int(part) for part in api_core_version.__version__.split(".")[0:2] - ] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class( - client_options={"universe_domain": "bar.com"}, - transport=transport_class( - credentials=ga_credentials.AnonymousCredentials(), - ), - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert ( - str(excinfo.value) - == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - ) - - # Test that ValueError is raised if universe_domain is provided via client options and credentials is None - with pytest.raises(ValueError): - client._compare_universes("foo.bar", None) + client._add_cred_info_for_auth_errors(error) + assert error.details == [] @pytest.mark.parametrize( @@ -514,6 +485,7 @@ def test_spanner_client_client_options(client_class, transport_class, transport_ client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -534,6 +506,7 @@ def test_spanner_client_client_options(client_class, transport_class, transport_ client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is @@ -552,6 +525,7 @@ def test_spanner_client_client_options(client_class, transport_class, transport_ client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has @@ -592,6 +566,7 @@ def test_spanner_client_client_options(client_class, transport_class, transport_ client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # Check the case api_endpoint is provided options = client_options.ClientOptions( @@ -612,6 +587,7 @@ def test_spanner_client_client_options(client_class, transport_class, transport_ client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience="https://language.googleapis.com", + metrics_interceptor=mock.ANY, ) @@ -684,6 +660,7 @@ def test_spanner_client_mtls_env_auto( client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # Check the case ADC client cert is provided. Whether client cert is used depends on @@ -721,6 +698,7 @@ def test_spanner_client_mtls_env_auto( client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # Check the case client_cert_source and ADC client cert are not provided. @@ -746,6 +724,7 @@ def test_spanner_client_mtls_env_auto( client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) @@ -961,6 +940,7 @@ def test_spanner_client_client_options_scopes( client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) @@ -998,6 +978,7 @@ def test_spanner_client_client_options_credentials_file( client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) @@ -1017,6 +998,7 @@ def test_spanner_client_client_options_from_dict(): client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) @@ -1053,6 +1035,7 @@ def test_spanner_client_create_channel_credentials_file( client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) # test that the credentials from file are saved and used as the credentials. @@ -6155,6 +6138,7 @@ def test_create_session_rest_required_fields(request_type=spanner.CreateSessionR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_session(request) @@ -6210,6 +6194,7 @@ def test_create_session_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_session(**mock_args) @@ -6351,6 +6336,7 @@ def test_batch_create_sessions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_create_sessions(request) @@ -6407,6 +6393,7 @@ def test_batch_create_sessions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_create_sessions(**mock_args) @@ -6537,6 +6524,7 @@ def test_get_session_rest_required_fields(request_type=spanner.GetSessionRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_session(request) @@ -6584,6 +6572,7 @@ def test_get_session_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_session(**mock_args) @@ -6721,6 +6710,7 @@ def test_list_sessions_rest_required_fields(request_type=spanner.ListSessionsReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_sessions(request) @@ -6777,6 +6767,7 @@ def test_list_sessions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_sessions(**mock_args) @@ -6966,6 +6957,7 @@ def test_delete_session_rest_required_fields(request_type=spanner.DeleteSessionR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_session(request) @@ -7011,6 +7003,7 @@ def test_delete_session_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_session(**mock_args) @@ -7145,6 +7138,7 @@ def test_execute_sql_rest_required_fields(request_type=spanner.ExecuteSqlRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_sql(request) @@ -7283,6 +7277,7 @@ def test_execute_streaming_sql_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -7419,6 +7414,7 @@ def test_execute_batch_dml_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_batch_dml(request) @@ -7555,6 +7551,7 @@ def test_read_rest_required_fields(request_type=spanner.ReadRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read(request) @@ -7692,6 +7689,7 @@ def test_streaming_read_rest_required_fields(request_type=spanner.ReadRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -7826,6 +7824,7 @@ def test_begin_transaction_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.begin_transaction(request) @@ -7886,6 +7885,7 @@ def test_begin_transaction_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.begin_transaction(**mock_args) @@ -8021,6 +8021,7 @@ def test_commit_rest_required_fields(request_type=spanner.CommitRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.commit(request) @@ -8071,6 +8072,7 @@ def test_commit_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.commit(**mock_args) @@ -8211,6 +8213,7 @@ def test_rollback_rest_required_fields(request_type=spanner.RollbackRequest): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.rollback(request) @@ -8265,6 +8268,7 @@ def test_rollback_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.rollback(**mock_args) @@ -8402,6 +8406,7 @@ def test_partition_query_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partition_query(request) @@ -8532,6 +8537,7 @@ def test_partition_read_rest_required_fields(request_type=spanner.PartitionReadR response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partition_read(request) @@ -8660,6 +8666,7 @@ def test_batch_write_rest_required_fields(request_type=spanner.BatchWriteRequest response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -8727,6 +8734,7 @@ def test_batch_write_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -9676,6 +9684,7 @@ def test_create_session_rest_bad_request(request_type=spanner.CreateSessionReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_session(request) @@ -9713,6 +9722,7 @@ def test_create_session_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_session(request) # Establish that the response is the type that we expect. @@ -9737,10 +9747,13 @@ def test_create_session_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_create_session" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_create_session_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_create_session" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.CreateSessionRequest.pb(spanner.CreateSessionRequest()) transcode.return_value = { "method": "post", @@ -9751,6 +9764,7 @@ def test_create_session_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.Session.to_json(spanner.Session()) req.return_value.content = return_value @@ -9761,6 +9775,7 @@ def test_create_session_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.Session() + post_with_metadata.return_value = spanner.Session(), metadata client.create_session( request, @@ -9772,6 +9787,7 @@ def test_create_session_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_batch_create_sessions_rest_bad_request( @@ -9795,6 +9811,7 @@ def test_batch_create_sessions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_create_sessions(request) @@ -9828,6 +9845,7 @@ def test_batch_create_sessions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_create_sessions(request) # Establish that the response is the type that we expect. @@ -9849,10 +9867,13 @@ def test_batch_create_sessions_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_batch_create_sessions" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_batch_create_sessions_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_batch_create_sessions" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.BatchCreateSessionsRequest.pb( spanner.BatchCreateSessionsRequest() ) @@ -9865,6 +9886,7 @@ def test_batch_create_sessions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.BatchCreateSessionsResponse.to_json( spanner.BatchCreateSessionsResponse() ) @@ -9877,6 +9899,10 @@ def test_batch_create_sessions_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.BatchCreateSessionsResponse() + post_with_metadata.return_value = ( + spanner.BatchCreateSessionsResponse(), + metadata, + ) client.batch_create_sessions( request, @@ -9888,6 +9914,7 @@ def test_batch_create_sessions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_get_session_rest_bad_request(request_type=spanner.GetSessionRequest): @@ -9911,6 +9938,7 @@ def test_get_session_rest_bad_request(request_type=spanner.GetSessionRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_session(request) @@ -9950,6 +9978,7 @@ def test_get_session_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_session(request) # Establish that the response is the type that we expect. @@ -9974,10 +10003,13 @@ def test_get_session_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_get_session" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_get_session_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_get_session" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.GetSessionRequest.pb(spanner.GetSessionRequest()) transcode.return_value = { "method": "post", @@ -9988,6 +10020,7 @@ def test_get_session_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.Session.to_json(spanner.Session()) req.return_value.content = return_value @@ -9998,6 +10031,7 @@ def test_get_session_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.Session() + post_with_metadata.return_value = spanner.Session(), metadata client.get_session( request, @@ -10009,6 +10043,7 @@ def test_get_session_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_list_sessions_rest_bad_request(request_type=spanner.ListSessionsRequest): @@ -10030,6 +10065,7 @@ def test_list_sessions_rest_bad_request(request_type=spanner.ListSessionsRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_sessions(request) @@ -10065,6 +10101,7 @@ def test_list_sessions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_sessions(request) # Establish that the response is the type that we expect. @@ -10087,10 +10124,13 @@ def test_list_sessions_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_list_sessions" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_list_sessions_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_list_sessions" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.ListSessionsRequest.pb(spanner.ListSessionsRequest()) transcode.return_value = { "method": "post", @@ -10101,6 +10141,7 @@ def test_list_sessions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.ListSessionsResponse.to_json( spanner.ListSessionsResponse() ) @@ -10113,6 +10154,7 @@ def test_list_sessions_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.ListSessionsResponse() + post_with_metadata.return_value = spanner.ListSessionsResponse(), metadata client.list_sessions( request, @@ -10124,6 +10166,7 @@ def test_list_sessions_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_delete_session_rest_bad_request(request_type=spanner.DeleteSessionRequest): @@ -10147,6 +10190,7 @@ def test_delete_session_rest_bad_request(request_type=spanner.DeleteSessionReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_session(request) @@ -10179,6 +10223,7 @@ def test_delete_session_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_session(request) # Establish that the response is the type that we expect. @@ -10211,6 +10256,7 @@ def test_delete_session_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = spanner.DeleteSessionRequest() metadata = [ @@ -10251,6 +10297,7 @@ def test_execute_sql_rest_bad_request(request_type=spanner.ExecuteSqlRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.execute_sql(request) @@ -10286,6 +10333,7 @@ def test_execute_sql_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_sql(request) # Establish that the response is the type that we expect. @@ -10307,10 +10355,13 @@ def test_execute_sql_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_execute_sql" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_execute_sql_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_execute_sql" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.ExecuteSqlRequest.pb(spanner.ExecuteSqlRequest()) transcode.return_value = { "method": "post", @@ -10321,6 +10372,7 @@ def test_execute_sql_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = result_set.ResultSet.to_json(result_set.ResultSet()) req.return_value.content = return_value @@ -10331,6 +10383,7 @@ def test_execute_sql_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = result_set.ResultSet() + post_with_metadata.return_value = result_set.ResultSet(), metadata client.execute_sql( request, @@ -10342,6 +10395,7 @@ def test_execute_sql_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_execute_streaming_sql_rest_bad_request(request_type=spanner.ExecuteSqlRequest): @@ -10365,6 +10419,7 @@ def test_execute_streaming_sql_rest_bad_request(request_type=spanner.ExecuteSqlR response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.execute_streaming_sql(request) @@ -10404,6 +10459,7 @@ def test_execute_streaming_sql_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_streaming_sql(request) assert isinstance(response, Iterable) @@ -10430,10 +10486,13 @@ def test_execute_streaming_sql_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_execute_streaming_sql" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_execute_streaming_sql_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_execute_streaming_sql" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.ExecuteSqlRequest.pb(spanner.ExecuteSqlRequest()) transcode.return_value = { "method": "post", @@ -10444,6 +10503,7 @@ def test_execute_streaming_sql_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = result_set.PartialResultSet.to_json( result_set.PartialResultSet() ) @@ -10456,6 +10516,7 @@ def test_execute_streaming_sql_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = result_set.PartialResultSet() + post_with_metadata.return_value = result_set.PartialResultSet(), metadata client.execute_streaming_sql( request, @@ -10467,6 +10528,7 @@ def test_execute_streaming_sql_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_execute_batch_dml_rest_bad_request( @@ -10492,6 +10554,7 @@ def test_execute_batch_dml_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.execute_batch_dml(request) @@ -10527,6 +10590,7 @@ def test_execute_batch_dml_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_batch_dml(request) # Establish that the response is the type that we expect. @@ -10548,10 +10612,13 @@ def test_execute_batch_dml_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_execute_batch_dml" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_execute_batch_dml_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_execute_batch_dml" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.ExecuteBatchDmlRequest.pb(spanner.ExecuteBatchDmlRequest()) transcode.return_value = { "method": "post", @@ -10562,6 +10629,7 @@ def test_execute_batch_dml_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.ExecuteBatchDmlResponse.to_json( spanner.ExecuteBatchDmlResponse() ) @@ -10574,6 +10642,7 @@ def test_execute_batch_dml_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.ExecuteBatchDmlResponse() + post_with_metadata.return_value = spanner.ExecuteBatchDmlResponse(), metadata client.execute_batch_dml( request, @@ -10585,6 +10654,7 @@ def test_execute_batch_dml_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_read_rest_bad_request(request_type=spanner.ReadRequest): @@ -10608,6 +10678,7 @@ def test_read_rest_bad_request(request_type=spanner.ReadRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read(request) @@ -10643,6 +10714,7 @@ def test_read_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read(request) # Establish that the response is the type that we expect. @@ -10664,10 +10736,13 @@ def test_read_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_read" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_read_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_read" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.ReadRequest.pb(spanner.ReadRequest()) transcode.return_value = { "method": "post", @@ -10678,6 +10753,7 @@ def test_read_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = result_set.ResultSet.to_json(result_set.ResultSet()) req.return_value.content = return_value @@ -10688,6 +10764,7 @@ def test_read_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = result_set.ResultSet() + post_with_metadata.return_value = result_set.ResultSet(), metadata client.read( request, @@ -10699,6 +10776,7 @@ def test_read_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_streaming_read_rest_bad_request(request_type=spanner.ReadRequest): @@ -10722,6 +10800,7 @@ def test_streaming_read_rest_bad_request(request_type=spanner.ReadRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.streaming_read(request) @@ -10761,6 +10840,7 @@ def test_streaming_read_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.streaming_read(request) assert isinstance(response, Iterable) @@ -10787,10 +10867,13 @@ def test_streaming_read_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_streaming_read" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_streaming_read_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_streaming_read" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.ReadRequest.pb(spanner.ReadRequest()) transcode.return_value = { "method": "post", @@ -10801,6 +10884,7 @@ def test_streaming_read_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = result_set.PartialResultSet.to_json( result_set.PartialResultSet() ) @@ -10813,6 +10897,7 @@ def test_streaming_read_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = result_set.PartialResultSet() + post_with_metadata.return_value = result_set.PartialResultSet(), metadata client.streaming_read( request, @@ -10824,6 +10909,7 @@ def test_streaming_read_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_begin_transaction_rest_bad_request( @@ -10849,6 +10935,7 @@ def test_begin_transaction_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.begin_transaction(request) @@ -10886,6 +10973,7 @@ def test_begin_transaction_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.begin_transaction(request) # Establish that the response is the type that we expect. @@ -10908,10 +10996,13 @@ def test_begin_transaction_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_begin_transaction" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_begin_transaction_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_begin_transaction" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.BeginTransactionRequest.pb( spanner.BeginTransactionRequest() ) @@ -10924,6 +11015,7 @@ def test_begin_transaction_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = transaction.Transaction.to_json(transaction.Transaction()) req.return_value.content = return_value @@ -10934,6 +11026,7 @@ def test_begin_transaction_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = transaction.Transaction() + post_with_metadata.return_value = transaction.Transaction(), metadata client.begin_transaction( request, @@ -10945,6 +11038,7 @@ def test_begin_transaction_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_commit_rest_bad_request(request_type=spanner.CommitRequest): @@ -10968,6 +11062,7 @@ def test_commit_rest_bad_request(request_type=spanner.CommitRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.commit(request) @@ -11003,6 +11098,7 @@ def test_commit_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.commit(request) # Establish that the response is the type that we expect. @@ -11024,10 +11120,13 @@ def test_commit_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_commit" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_commit_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_commit" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.CommitRequest.pb(spanner.CommitRequest()) transcode.return_value = { "method": "post", @@ -11038,6 +11137,7 @@ def test_commit_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = commit_response.CommitResponse.to_json( commit_response.CommitResponse() ) @@ -11050,6 +11150,7 @@ def test_commit_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = commit_response.CommitResponse() + post_with_metadata.return_value = commit_response.CommitResponse(), metadata client.commit( request, @@ -11061,6 +11162,7 @@ def test_commit_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_rollback_rest_bad_request(request_type=spanner.RollbackRequest): @@ -11084,6 +11186,7 @@ def test_rollback_rest_bad_request(request_type=spanner.RollbackRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.rollback(request) @@ -11116,6 +11219,7 @@ def test_rollback_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.rollback(request) # Establish that the response is the type that we expect. @@ -11148,6 +11252,7 @@ def test_rollback_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = spanner.RollbackRequest() metadata = [ @@ -11188,6 +11293,7 @@ def test_partition_query_rest_bad_request(request_type=spanner.PartitionQueryReq response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.partition_query(request) @@ -11223,6 +11329,7 @@ def test_partition_query_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partition_query(request) # Establish that the response is the type that we expect. @@ -11244,10 +11351,13 @@ def test_partition_query_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_partition_query" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_partition_query_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_partition_query" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.PartitionQueryRequest.pb(spanner.PartitionQueryRequest()) transcode.return_value = { "method": "post", @@ -11258,6 +11368,7 @@ def test_partition_query_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.PartitionResponse.to_json(spanner.PartitionResponse()) req.return_value.content = return_value @@ -11268,6 +11379,7 @@ def test_partition_query_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.PartitionResponse() + post_with_metadata.return_value = spanner.PartitionResponse(), metadata client.partition_query( request, @@ -11279,6 +11391,7 @@ def test_partition_query_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_partition_read_rest_bad_request(request_type=spanner.PartitionReadRequest): @@ -11302,6 +11415,7 @@ def test_partition_read_rest_bad_request(request_type=spanner.PartitionReadReque response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.partition_read(request) @@ -11337,6 +11451,7 @@ def test_partition_read_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partition_read(request) # Establish that the response is the type that we expect. @@ -11358,10 +11473,13 @@ def test_partition_read_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_partition_read" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_partition_read_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_partition_read" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.PartitionReadRequest.pb(spanner.PartitionReadRequest()) transcode.return_value = { "method": "post", @@ -11372,6 +11490,7 @@ def test_partition_read_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.PartitionResponse.to_json(spanner.PartitionResponse()) req.return_value.content = return_value @@ -11382,6 +11501,7 @@ def test_partition_read_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.PartitionResponse() + post_with_metadata.return_value = spanner.PartitionResponse(), metadata client.partition_read( request, @@ -11393,6 +11513,7 @@ def test_partition_read_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_batch_write_rest_bad_request(request_type=spanner.BatchWriteRequest): @@ -11416,6 +11537,7 @@ def test_batch_write_rest_bad_request(request_type=spanner.BatchWriteRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.batch_write(request) @@ -11454,6 +11576,7 @@ def test_batch_write_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.batch_write(request) assert isinstance(response, Iterable) @@ -11479,10 +11602,13 @@ def test_batch_write_rest_interceptors(null_interceptor): ) as transcode, mock.patch.object( transports.SpannerRestInterceptor, "post_batch_write" ) as post, mock.patch.object( + transports.SpannerRestInterceptor, "post_batch_write_with_metadata" + ) as post_with_metadata, mock.patch.object( transports.SpannerRestInterceptor, "pre_batch_write" ) as pre: pre.assert_not_called() post.assert_not_called() + post_with_metadata.assert_not_called() pb_message = spanner.BatchWriteRequest.pb(spanner.BatchWriteRequest()) transcode.return_value = { "method": "post", @@ -11493,6 +11619,7 @@ def test_batch_write_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = spanner.BatchWriteResponse.to_json(spanner.BatchWriteResponse()) req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) @@ -11503,6 +11630,7 @@ def test_batch_write_rest_interceptors(null_interceptor): ] pre.return_value = request, metadata post.return_value = spanner.BatchWriteResponse() + post_with_metadata.return_value = spanner.BatchWriteResponse(), metadata client.batch_write( request, @@ -11514,6 +11642,7 @@ def test_batch_write_rest_interceptors(null_interceptor): pre.assert_called_once() post.assert_called_once() + post_with_metadata.assert_called_once() def test_initialize_client_w_rest(): @@ -12600,4 +12729,5 @@ def test_api_key_credentials(client_class, transport_class): client_info=transports.base.DEFAULT_CLIENT_INFO, always_use_jwt_access=True, api_audience=None, + metrics_interceptor=mock.ANY, ) diff --git a/tests/unit/spanner_dbapi/test_cursor.py b/tests/unit/spanner_dbapi/test_cursor.py index 3836e1f8e5..2a8cddac9b 100644 --- a/tests/unit/spanner_dbapi/test_cursor.py +++ b/tests/unit/spanner_dbapi/test_cursor.py @@ -148,7 +148,8 @@ def test_do_batch_update(self): ("DELETE FROM table WHERE col1 = @a0", {"a0": 1}, {"a0": INT64}), ("DELETE FROM table WHERE col1 = @a0", {"a0": 2}, {"a0": INT64}), ("DELETE FROM table WHERE col1 = @a0", {"a0": 3}, {"a0": INT64}), - ] + ], + last_statement=True, ) self.assertEqual(cursor._row_count, 3) @@ -539,7 +540,8 @@ def test_executemany_delete_batch_autocommit(self): ("DELETE FROM table WHERE col1 = @a0", {"a0": 1}, {"a0": INT64}), ("DELETE FROM table WHERE col1 = @a0", {"a0": 2}, {"a0": INT64}), ("DELETE FROM table WHERE col1 = @a0", {"a0": 3}, {"a0": INT64}), - ] + ], + last_statement=True, ) def test_executemany_update_batch_autocommit(self): @@ -582,7 +584,8 @@ def test_executemany_update_batch_autocommit(self): {"a0": 3, "a1": "c"}, {"a0": INT64, "a1": STRING}, ), - ] + ], + last_statement=True, ) def test_executemany_insert_batch_non_autocommit(self): @@ -659,7 +662,8 @@ def test_executemany_insert_batch_autocommit(self): {"a0": 5, "a1": 6, "a2": 7, "a3": 8}, {"a0": INT64, "a1": INT64, "a2": INT64, "a3": INT64}, ), - ] + ], + last_statement=True, ) transaction.commit.assert_called_once() diff --git a/tests/unit/test__helpers.py b/tests/unit/test__helpers.py index ecc8018648..bd861cc8eb 100644 --- a/tests/unit/test__helpers.py +++ b/tests/unit/test__helpers.py @@ -15,6 +15,7 @@ import unittest import mock +from google.cloud.spanner_v1 import TransactionOptions class Test_merge_query_options(unittest.TestCase): @@ -955,3 +956,83 @@ def test(self): self.assertEqual( metadata, ("x-goog-spanner-route-to-leader", str(value).lower()) ) + + +class Test_merge_transaction_options(unittest.TestCase): + def _callFUT(self, *args, **kw): + from google.cloud.spanner_v1._helpers import _merge_Transaction_Options + + return _merge_Transaction_Options(*args, **kw) + + def test_default_none_and_merge_none(self): + default = merge = None + result = self._callFUT(default, merge) + self.assertIsNone(result) + + def test_default_options_and_merge_none(self): + default = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ + ) + merge = None + result = self._callFUT(default, merge) + expected = default + self.assertEqual(result, expected) + + def test_default_none_and_merge_options(self): + default = None + merge = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE + ) + expected = merge + result = self._callFUT(default, merge) + self.assertEqual(result, expected) + + def test_default_and_merge_isolation_options(self): + default = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE, + read_write=TransactionOptions.ReadWrite(), + ) + merge = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + exclude_txn_from_change_streams=True, + ) + expected = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + read_write=TransactionOptions.ReadWrite(), + exclude_txn_from_change_streams=True, + ) + result = self._callFUT(default, merge) + self.assertEqual(result, expected) + + def test_default_isolation_and_merge_options(self): + default = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE + ) + merge = TransactionOptions( + read_write=TransactionOptions.ReadWrite(), + exclude_txn_from_change_streams=True, + ) + expected = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE, + read_write=TransactionOptions.ReadWrite(), + exclude_txn_from_change_streams=True, + ) + result = self._callFUT(default, merge) + self.assertEqual(result, expected) + + def test_default_isolation_and_merge_options_isolation_unspecified(self): + default = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE + ) + merge = TransactionOptions( + read_write=TransactionOptions.ReadWrite(), + exclude_txn_from_change_streams=True, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, + ) + expected = TransactionOptions( + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE, + read_write=TransactionOptions.ReadWrite(), + exclude_txn_from_change_streams=True, + ) + result = self._callFUT(default, merge) + self.assertEqual(result, expected) diff --git a/tests/unit/test_batch.py b/tests/unit/test_batch.py index ff05bf6307..2cea740ab6 100644 --- a/tests/unit/test_batch.py +++ b/tests/unit/test_batch.py @@ -14,6 +14,7 @@ import unittest +from tests import _helpers as ot_helpers from unittest.mock import MagicMock from tests._helpers import ( OpenTelemetryBase, @@ -21,7 +22,21 @@ StatusCode, enrich_with_otel_scope, ) -from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1 import ( + RequestOptions, + CommitResponse, + TransactionOptions, + Mutation, + BatchWriteResponse, + DefaultTransactionOptions, +) +from google.cloud._helpers import UTC, _datetime_to_pb_timestamp +import datetime +from google.api_core.exceptions import Aborted, Unknown +from google.cloud.spanner_v1.batch import MutationGroups, _BatchBase, Batch +from google.cloud.spanner_v1.keyset import KeySet +from google.rpc.status_pb2 import Status + TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] @@ -57,8 +72,6 @@ def _make_one(self, *args, **kwargs): class Test_BatchBase(_BaseTest): def _getTargetClass(self): - from google.cloud.spanner_v1.batch import _BatchBase - return _BatchBase def _compare_values(self, result, source): @@ -83,8 +96,6 @@ def test__check_state_virtual(self): base._check_state() def test_insert(self): - from google.cloud.spanner_v1 import Mutation - session = _Session() base = self._make_one(session) @@ -100,8 +111,6 @@ def test_insert(self): self._compare_values(write.values, VALUES) def test_update(self): - from google.cloud.spanner_v1 import Mutation - session = _Session() base = self._make_one(session) @@ -117,8 +126,6 @@ def test_update(self): self._compare_values(write.values, VALUES) def test_insert_or_update(self): - from google.cloud.spanner_v1 import Mutation - session = _Session() base = self._make_one(session) @@ -134,8 +141,6 @@ def test_insert_or_update(self): self._compare_values(write.values, VALUES) def test_replace(self): - from google.cloud.spanner_v1 import Mutation - session = _Session() base = self._make_one(session) @@ -151,9 +156,6 @@ def test_replace(self): self._compare_values(write.values, VALUES) def test_delete(self): - from google.cloud.spanner_v1 import Mutation - from google.cloud.spanner_v1.keyset import KeySet - keys = [[0], [1], [2]] keyset = KeySet(keys=keys) session = _Session() @@ -176,8 +178,6 @@ def test_delete(self): class TestBatch(_BaseTest, OpenTelemetryBase): def _getTargetClass(self): - from google.cloud.spanner_v1.batch import Batch - return Batch def test_ctor(self): @@ -186,8 +186,6 @@ def test_ctor(self): self.assertIs(batch._session, session) def test_commit_already_committed(self): - from google.cloud.spanner_v1.keyset import KeySet - keys = [[0], [1], [2]] keyset = KeySet(keys=keys) database = _Database() @@ -202,9 +200,6 @@ def test_commit_already_committed(self): self.assertNoSpans() def test_commit_grpc_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1.keyset import KeySet - keys = [[0], [1], [2]] keyset = KeySet(keys=keys) database = _Database() @@ -223,12 +218,6 @@ def test_commit_grpc_error(self): ) def test_commit_ok(self): - import datetime - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import TransactionOptions - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - now = datetime.datetime.utcnow().replace(tzinfo=UTC) now_pb = _datetime_to_pb_timestamp(now) response = CommitResponse(commit_timestamp=now_pb) @@ -273,7 +262,6 @@ def test_commit_ok(self): def test_aborted_exception_on_commit_with_retries(self): # Test case to verify that an Aborted exception is raised when # batch.commit() is called and the transaction is aborted internally. - from google.api_core.exceptions import Aborted database = _Database() # Setup the spanner API which throws Aborted exception when calling commit API. @@ -306,13 +294,8 @@ def _test_commit_with_options( request_options=None, max_commit_delay_in=None, exclude_txn_from_change_streams=False, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, ): - import datetime - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import TransactionOptions - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - now = datetime.datetime.utcnow().replace(tzinfo=UTC) now_pb = _datetime_to_pb_timestamp(now) response = CommitResponse(commit_timestamp=now_pb) @@ -326,6 +309,7 @@ def _test_commit_with_options( request_options=request_options, max_commit_delay=max_commit_delay_in, exclude_txn_from_change_streams=exclude_txn_from_change_streams, + isolation_level=isolation_level, ) self.assertEqual(committed, now) @@ -354,6 +338,10 @@ def _test_commit_with_options( single_use_txn.exclude_txn_from_change_streams, exclude_txn_from_change_streams, ) + self.assertEqual( + single_use_txn.isolation_level, + isolation_level, + ) self.assertEqual( metadata, [ @@ -399,8 +387,6 @@ def test_commit_w_incorrect_tag_dictionary_error(self): self._test_commit_with_options(request_options=request_options) def test_commit_w_max_commit_delay(self): - import datetime - request_options = RequestOptions( request_tag="tag-1", ) @@ -417,10 +403,16 @@ def test_commit_w_exclude_txn_from_change_streams(self): request_options=request_options, exclude_txn_from_change_streams=True ) - def test_context_mgr_already_committed(self): - import datetime - from google.cloud._helpers import UTC + def test_commit_w_isolation_level(self): + request_options = RequestOptions( + request_tag="tag-1", + ) + self._test_commit_with_options( + request_options=request_options, + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + ) + def test_context_mgr_already_committed(self): now = datetime.datetime.utcnow().replace(tzinfo=UTC) database = _Database() api = database.spanner_api = _FauxSpannerAPI() @@ -435,12 +427,6 @@ def test_context_mgr_already_committed(self): self.assertEqual(api._committed, None) def test_context_mgr_success(self): - import datetime - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import TransactionOptions - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - now = datetime.datetime.utcnow().replace(tzinfo=UTC) now_pb = _datetime_to_pb_timestamp(now) response = CommitResponse(commit_timestamp=now_pb) @@ -481,11 +467,6 @@ def test_context_mgr_success(self): ) def test_context_mgr_failure(self): - import datetime - from google.cloud.spanner_v1 import CommitResponse - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - now = datetime.datetime.utcnow().replace(tzinfo=UTC) now_pb = _datetime_to_pb_timestamp(now) response = CommitResponse(commit_timestamp=now_pb) @@ -509,8 +490,6 @@ class _BailOut(Exception): class TestMutationGroups(_BaseTest, OpenTelemetryBase): def _getTargetClass(self): - from google.cloud.spanner_v1.batch import MutationGroups - return MutationGroups def test_ctor(self): @@ -519,8 +498,6 @@ def test_ctor(self): self.assertIs(groups._session, session) def test_batch_write_already_committed(self): - from google.cloud.spanner_v1.keyset import KeySet - keys = [[0], [1], [2]] keyset = KeySet(keys=keys) database = _Database() @@ -541,9 +518,6 @@ def test_batch_write_already_committed(self): groups.batch_write() def test_batch_write_grpc_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1.keyset import KeySet - keys = [[0], [1], [2]] keyset = KeySet(keys=keys) database = _Database() @@ -563,21 +537,18 @@ def test_batch_write_grpc_error(self): ) def _test_batch_write_with_request_options( - self, request_options=None, exclude_txn_from_change_streams=False + self, + request_options=None, + exclude_txn_from_change_streams=False, + enable_end_to_end_tracing=False, ): - import datetime - from google.cloud.spanner_v1 import BatchWriteResponse - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.rpc.status_pb2 import Status - now = datetime.datetime.utcnow().replace(tzinfo=UTC) now_pb = _datetime_to_pb_timestamp(now) status_pb = Status(code=200) response = BatchWriteResponse( commit_timestamp=now_pb, indexes=[0], status=status_pb ) - database = _Database() + database = _Database(enable_end_to_end_tracing=enable_end_to_end_tracing) api = database.spanner_api = _FauxSpannerAPI(_batch_write_response=[response]) session = _Session(database) groups = self._make_one(session) @@ -600,13 +571,22 @@ def _test_batch_write_with_request_options( ) = api._batch_request self.assertEqual(session, self.SESSION_NAME) self.assertEqual(mutation_groups, groups._mutation_groups) - self.assertEqual( - metadata, - [ - ("google-cloud-resource-prefix", database.name), - ("x-goog-spanner-route-to-leader", "true"), - ], - ) + expected_metadata = [ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ] + + if enable_end_to_end_tracing and ot_helpers.HAS_OPENTELEMETRY_INSTALLED: + expected_metadata.append(("x-goog-spanner-end-to-end-tracing", "true")) + self.assertTrue( + any(key == "traceparent" for key, _ in metadata), + "traceparent is missing in metadata", + ) + + # Remove traceparent from actual metadata for comparison + filtered_metadata = [item for item in metadata if item[0] != "traceparent"] + + self.assertEqual(filtered_metadata, expected_metadata) if request_options is None: expected_request_options = RequestOptions() elif type(request_options) is dict: @@ -627,6 +607,9 @@ def _test_batch_write_with_request_options( def test_batch_write_no_request_options(self): self._test_batch_write_with_request_options() + def test_batch_write_end_to_end_tracing_enabled(self): + self._test_batch_write_with_request_options(enable_end_to_end_tracing=True) + def test_batch_write_w_transaction_tag_success(self): self._test_batch_write_with_request_options( RequestOptions(transaction_tag="tag-1-1") @@ -656,8 +639,12 @@ def session_id(self): class _Database(object): - name = "testing" - _route_to_leader_enabled = True + def __init__(self, enable_end_to_end_tracing=False): + self.name = "testing" + self._route_to_leader_enabled = True + if enable_end_to_end_tracing: + self.observability_options = dict(enable_end_to_end_tracing=True) + self.default_transaction_options = DefaultTransactionOptions() class _FauxSpannerAPI: @@ -676,9 +663,6 @@ def commit( request=None, metadata=None, ): - from google.api_core.exceptions import Unknown - from google.api_core.exceptions import Aborted - max_commit_delay = None if type(request).pb(request).HasField("max_commit_delay"): max_commit_delay = request.max_commit_delay @@ -703,8 +687,6 @@ def batch_write( request=None, metadata=None, ): - from google.api_core.exceptions import Unknown - self._batch_request = ( request.session, request.mutation_groups, diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 174e5116c2..a464209874 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -14,8 +14,9 @@ import unittest +import os import mock -from google.cloud.spanner_v1 import DirectedReadOptions +from google.cloud.spanner_v1 import DirectedReadOptions, DefaultTransactionOptions def _make_credentials(): @@ -52,6 +53,9 @@ class TestClient(unittest.TestCase): "auto_failover_disabled": True, }, } + DEFAULT_TRANSACTION_OPTIONS = DefaultTransactionOptions( + isolation_level="SERIALIZABLE" + ) def _get_target_class(self): from google.cloud import spanner @@ -72,6 +76,7 @@ def _constructor_test_helper( expected_query_options=None, route_to_leader_enabled=True, directed_read_options=None, + default_transaction_options=None, ): import google.api_core.client_options from google.cloud.spanner_v1 import client as MUT @@ -98,6 +103,7 @@ def _constructor_test_helper( credentials=creds, query_options=query_options, directed_read_options=directed_read_options, + default_transaction_options=default_transaction_options, **kwargs ) @@ -128,6 +134,10 @@ def _constructor_test_helper( self.assertFalse(client.route_to_leader_enabled) if directed_read_options is not None: self.assertEqual(client.directed_read_options, directed_read_options) + if default_transaction_options is not None: + self.assertEqual( + client.default_transaction_options, default_transaction_options + ) @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") @mock.patch("warnings.warn") @@ -158,6 +168,8 @@ def test_constructor_custom_client_info(self): creds = _make_credentials() self._constructor_test_helper(expected_scopes, creds, client_info=client_info) + # Disable metrics to avoid google.auth.default calls from Metric Exporter + @mock.patch.dict(os.environ, {"SPANNER_ENABLE_BUILTIN_METRICS": ""}) def test_constructor_implicit_credentials(self): from google.cloud.spanner_v1 import client as MUT @@ -259,6 +271,17 @@ def test_constructor_route_to_leader_disbled(self): expected_scopes, creds, route_to_leader_enabled=False ) + def test_constructor_w_default_transaction_options(self): + from google.cloud.spanner_v1 import client as MUT + + expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) + creds = _make_credentials() + self._constructor_test_helper( + expected_scopes, + creds, + default_transaction_options=self.DEFAULT_TRANSACTION_OPTIONS, + ) + @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") def test_instance_admin_api(self, mock_em): from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 13a37f66fe..1afda7f850 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -25,7 +25,11 @@ from google.api_core.retry import Retry from google.protobuf.field_mask_pb2 import FieldMask -from google.cloud.spanner_v1 import RequestOptions, DirectedReadOptions +from google.cloud.spanner_v1 import ( + RequestOptions, + DirectedReadOptions, + DefaultTransactionOptions, +) DML_WO_PARAM = """ DELETE FROM citizens @@ -3116,6 +3120,7 @@ def __init__( project=TestDatabase.PROJECT_ID, route_to_leader_enabled=True, directed_read_options=None, + default_transaction_options=DefaultTransactionOptions(), ): from google.cloud.spanner_v1 import ExecuteSqlRequest @@ -3129,6 +3134,7 @@ def __init__( self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") self.route_to_leader_enabled = route_to_leader_enabled self.directed_read_options = directed_read_options + self.default_transaction_options = default_transaction_options class _Instance(object): @@ -3156,6 +3162,7 @@ def __init__(self, name, instance=None): self.logger = mock.create_autospec(Logger, instance=True) self._directed_read_options = None + self.default_transaction_options = DefaultTransactionOptions() class _Pool(object): diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index 1bfafb37fe..e7ad729438 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -14,6 +14,7 @@ import unittest import mock +from google.cloud.spanner_v1 import DefaultTransactionOptions class TestInstance(unittest.TestCase): @@ -1019,6 +1020,7 @@ def __init__(self, project, timeout_seconds=None): self.timeout_seconds = timeout_seconds self.route_to_leader_enabled = True self.directed_read_options = None + self.default_transaction_options = DefaultTransactionOptions() def copy(self): from copy import deepcopy diff --git a/tests/unit/test_metrics.py b/tests/unit/test_metrics.py new file mode 100644 index 0000000000..cd5ca2e6fc --- /dev/null +++ b/tests/unit/test_metrics.py @@ -0,0 +1,90 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from unittest.mock import MagicMock +from google.api_core.exceptions import ServiceUnavailable +from google.cloud.spanner_v1.client import Client +from unittest.mock import patch +from grpc._interceptor import _UnaryOutcome +from google.cloud.spanner_v1.metrics.spanner_metrics_tracer_factory import ( + SpannerMetricsTracerFactory, +) +from opentelemetry import metrics + +pytest.importorskip("opentelemetry") +# Skip if semconv attributes are not present, as tracing wont' be enabled either +# pytest.importorskip("opentelemetry.semconv.attributes.otel_attributes") + + +@pytest.fixture(autouse=True) +def patched_client(monkeypatch): + monkeypatch.setenv("SPANNER_ENABLE_BUILTIN_METRICS", "true") + metrics.set_meter_provider(metrics.NoOpMeterProvider()) + + # Remove the Tracer factory to avoid previously disabled factory polluting from other tests + if SpannerMetricsTracerFactory._metrics_tracer_factory is not None: + SpannerMetricsTracerFactory._metrics_tracer_factory = None + + client = Client() + yield client + + # Resetting + metrics.set_meter_provider(metrics.NoOpMeterProvider()) + SpannerMetricsTracerFactory._metrics_tracer_factory = None + SpannerMetricsTracerFactory.current_metrics_tracer = None + + +def test_metrics_emission_with_failure_attempt(patched_client): + instance = patched_client.instance("test-instance") + database = instance.database("example-db") + factory = SpannerMetricsTracerFactory() + + assert factory.enabled + + transport = database.spanner_api._transport + metrics_interceptor = transport._metrics_interceptor + original_intercept = metrics_interceptor.intercept + first_attempt = True + + def mocked_raise(*args, **kwargs): + raise ServiceUnavailable("Service Unavailable") + + def mocked_call(*args, **kwargs): + return _UnaryOutcome(MagicMock(), MagicMock()) + + def intercept_wrapper(invoked_method, request_or_iterator, call_details): + nonlocal original_intercept + nonlocal first_attempt + invoked_method = mocked_call + if first_attempt: + first_attempt = False + invoked_method = mocked_raise + response = original_intercept( + invoked_method=invoked_method, + request_or_iterator=request_or_iterator, + call_details=call_details, + ) + return response + + metrics_interceptor.intercept = intercept_wrapper + patch_path = "google.cloud.spanner_v1.metrics.metrics_exporter.CloudMonitoringMetricsExporter.export" + with patch(patch_path): + with database.snapshot(): + pass + + # Verify that the attempt count increased from the failed initial attempt + assert ( + SpannerMetricsTracerFactory.current_metrics_tracer.current_op.attempt_count + ) == 2 diff --git a/tests/unit/test_metrics_capture.py b/tests/unit/test_metrics_capture.py new file mode 100644 index 0000000000..107e9daeb4 --- /dev/null +++ b/tests/unit/test_metrics_capture.py @@ -0,0 +1,50 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from unittest import mock +from google.cloud.spanner_v1.metrics.metrics_capture import MetricsCapture +from google.cloud.spanner_v1.metrics.metrics_tracer_factory import MetricsTracerFactory +from google.cloud.spanner_v1.metrics.spanner_metrics_tracer_factory import ( + SpannerMetricsTracerFactory, +) + + +@pytest.fixture +def mock_tracer_factory(): + SpannerMetricsTracerFactory(enabled=True) + with mock.patch.object( + MetricsTracerFactory, "create_metrics_tracer" + ) as mock_create: + yield mock_create + + +def test_metrics_capture_enter(mock_tracer_factory): + mock_tracer = mock.Mock() + mock_tracer_factory.return_value = mock_tracer + + with MetricsCapture() as capture: + assert capture is not None + mock_tracer_factory.assert_called_once() + mock_tracer.record_operation_start.assert_called_once() + + +def test_metrics_capture_exit(mock_tracer_factory): + mock_tracer = mock.Mock() + mock_tracer_factory.return_value = mock_tracer + + with MetricsCapture(): + pass + + mock_tracer.record_operation_completion.assert_called_once() diff --git a/tests/unit/test_metric_exporter.py b/tests/unit/test_metrics_exporter.py similarity index 99% rename from tests/unit/test_metric_exporter.py rename to tests/unit/test_metrics_exporter.py index 08ae9ecf21..62fb531345 100644 --- a/tests/unit/test_metric_exporter.py +++ b/tests/unit/test_metrics_exporter.py @@ -1,4 +1,4 @@ -# Copyright 2016 Google LLC All rights reserved. +# Copyright 2025 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -333,7 +333,7 @@ def create_tsr_side_effect(name, time_series): self.assertEqual(len(mockClient.create_service_time_series.mock_calls), 2) @patch( - "google.cloud.spanner_v1.metrics.metrics_exporter.HAS_DEPENDENCIES_INSTALLED", + "google.cloud.spanner_v1.metrics.metrics_exporter.HAS_OPENTELEMETRY_INSTALLED", False, ) def test_export_early_exit_if_extras_not_installed(self): diff --git a/tests/unit/test_metrics_interceptor.py b/tests/unit/test_metrics_interceptor.py new file mode 100644 index 0000000000..e32003537f --- /dev/null +++ b/tests/unit/test_metrics_interceptor.py @@ -0,0 +1,128 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest +from google.cloud.spanner_v1.metrics.metrics_interceptor import MetricsInterceptor +from google.cloud.spanner_v1.metrics.spanner_metrics_tracer_factory import ( + SpannerMetricsTracerFactory, +) +from unittest.mock import MagicMock + + +@pytest.fixture +def interceptor(): + SpannerMetricsTracerFactory(enabled=True) + return MetricsInterceptor() + + +def test_parse_resource_path_valid(interceptor): + path = "projects/my_project/instances/my_instance/databases/my_database" + expected = { + "project": "my_project", + "instance": "my_instance", + "database": "my_database", + } + assert interceptor._parse_resource_path(path) == expected + + +def test_parse_resource_path_invalid(interceptor): + path = "invalid/path" + expected = {} + assert interceptor._parse_resource_path(path) == expected + + +def test_extract_resource_from_path(interceptor): + metadata = [ + ( + "google-cloud-resource-prefix", + "projects/my_project/instances/my_instance/databases/my_database", + ) + ] + expected = { + "project": "my_project", + "instance": "my_instance", + "database": "my_database", + } + assert interceptor._extract_resource_from_path(metadata) == expected + + +def test_set_metrics_tracer_attributes(interceptor): + SpannerMetricsTracerFactory.current_metrics_tracer = MockMetricTracer() + resources = { + "project": "my_project", + "instance": "my_instance", + "database": "my_database", + } + + interceptor._set_metrics_tracer_attributes(resources) + assert SpannerMetricsTracerFactory.current_metrics_tracer.project == "my_project" + assert SpannerMetricsTracerFactory.current_metrics_tracer.instance == "my_instance" + assert SpannerMetricsTracerFactory.current_metrics_tracer.database == "my_database" + + +def test_intercept_with_tracer(interceptor): + SpannerMetricsTracerFactory.current_metrics_tracer = MockMetricTracer() + SpannerMetricsTracerFactory.current_metrics_tracer.record_attempt_start = ( + MagicMock() + ) + SpannerMetricsTracerFactory.current_metrics_tracer.record_attempt_completion = ( + MagicMock() + ) + SpannerMetricsTracerFactory.current_metrics_tracer.gfe_enabled = False + + invoked_response = MagicMock() + invoked_response.initial_metadata.return_value = {} + + mock_invoked_method = MagicMock(return_value=invoked_response) + call_details = MagicMock( + method="spanner.someMethod", + metadata=[ + ( + "google-cloud-resource-prefix", + "projects/my_project/instances/my_instance/databases/my_database", + ) + ], + ) + + response = interceptor.intercept(mock_invoked_method, "request", call_details) + assert response == invoked_response + SpannerMetricsTracerFactory.current_metrics_tracer.record_attempt_start.assert_called_once() + SpannerMetricsTracerFactory.current_metrics_tracer.record_attempt_completion.assert_called_once() + mock_invoked_method.assert_called_once_with("request", call_details) + + +class MockMetricTracer: + def __init__(self): + self.project = None + self.instance = None + self.database = None + self.method = None + + def set_project(self, project): + self.project = project + + def set_instance(self, instance): + self.instance = instance + + def set_database(self, database): + self.database = database + + def set_method(self, method): + self.method = method + + def record_attempt_start(self): + pass + + def record_attempt_completion(self): + pass diff --git a/tests/unit/test_metrics_tracer.py b/tests/unit/test_metrics_tracer.py index 9b59c59a7c..70491ef5b2 100644 --- a/tests/unit/test_metrics_tracer.py +++ b/tests/unit/test_metrics_tracer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -222,3 +221,45 @@ def test_set_method(metrics_tracer): # Ensure it does not overwrite metrics_tracer.set_method("new_method") assert metrics_tracer.client_attributes["method"] == "test_method" + + +def test_record_gfe_latency(metrics_tracer): + mock_gfe_latency = mock.create_autospec(Histogram, instance=True) + metrics_tracer._instrument_gfe_latency = mock_gfe_latency + metrics_tracer.gfe_enabled = True # Ensure GFE is enabled + + # Test when tracing is enabled + metrics_tracer.record_gfe_latency(100) + assert mock_gfe_latency.record.call_count == 1 + assert mock_gfe_latency.record.call_args[1]["amount"] == 100 + assert ( + mock_gfe_latency.record.call_args[1]["attributes"] + == metrics_tracer.client_attributes + ) + + # Test when tracing is disabled + metrics_tracer.enabled = False + metrics_tracer.record_gfe_latency(200) + assert mock_gfe_latency.record.call_count == 1 # Should not increment + metrics_tracer.enabled = True # Reset for next test + + +def test_record_gfe_missing_header_count(metrics_tracer): + mock_gfe_missing_header_count = mock.create_autospec(Counter, instance=True) + metrics_tracer._instrument_gfe_missing_header_count = mock_gfe_missing_header_count + metrics_tracer.gfe_enabled = True # Ensure GFE is enabled + + # Test when tracing is enabled + metrics_tracer.record_gfe_missing_header_count() + assert mock_gfe_missing_header_count.add.call_count == 1 + assert mock_gfe_missing_header_count.add.call_args[1]["amount"] == 1 + assert ( + mock_gfe_missing_header_count.add.call_args[1]["attributes"] + == metrics_tracer.client_attributes + ) + + # Test when tracing is disabled + metrics_tracer.enabled = False + metrics_tracer.record_gfe_missing_header_count() + assert mock_gfe_missing_header_count.add.call_count == 1 # Should not increment + metrics_tracer.enabled = True # Reset for next test diff --git a/tests/unit/test_metrics_tracer_factory.py b/tests/unit/test_metrics_tracer_factory.py index 637bc4c06a..64fb4d83d1 100644 --- a/tests/unit/test_metrics_tracer_factory.py +++ b/tests/unit/test_metrics_tracer_factory.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Copyright 2025 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index ff8e9dad12..8f5f7039b9 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -14,20 +14,44 @@ import google.api_core.gapic_v1.method -from google.cloud.spanner_v1 import RequestOptions from google.cloud.spanner_v1._opentelemetry_tracing import trace_call import mock +import datetime +from google.cloud.spanner_v1 import ( + Transaction as TransactionPB, + TransactionOptions, + CommitResponse, + CommitRequest, + RequestOptions, + SpannerClient, + CreateSessionRequest, + Session as SessionRequestProto, + ExecuteSqlRequest, + TypeCode, +) +from google.cloud._helpers import UTC, _datetime_to_pb_timestamp +from google.cloud.spanner_v1._helpers import _delay_until_retry +from google.cloud.spanner_v1.transaction import Transaction from tests._helpers import ( OpenTelemetryBase, LIB_VERSION, StatusCode, enrich_with_otel_scope, ) +import grpc +from google.cloud.spanner_v1.session import Session +from google.cloud.spanner_v1.snapshot import Snapshot +from google.cloud.spanner_v1.database import Database +from google.cloud.spanner_v1.keyset import KeySet +from google.protobuf.duration_pb2 import Duration +from google.rpc.error_details_pb2 import RetryInfo +from google.api_core.exceptions import Unknown, Aborted, NotFound, Cancelled +from google.protobuf.struct_pb2 import Struct, Value +from google.cloud.spanner_v1.batch import Batch +from google.cloud.spanner_v1 import DefaultTransactionOptions def _make_rpc_error(error_cls, trailing_metadata=None): - import grpc - grpc_error = mock.create_autospec(grpc.Call, instance=True) grpc_error.trailing_metadata.return_value = trailing_metadata return error_cls("error", errors=(grpc_error,)) @@ -54,33 +78,31 @@ class TestSession(OpenTelemetryBase): enrich_with_otel_scope(BASE_ATTRIBUTES) def _getTargetClass(self): - from google.cloud.spanner_v1.session import Session - return Session def _make_one(self, *args, **kwargs): return self._getTargetClass()(*args, **kwargs) @staticmethod - def _make_database(name=DATABASE_NAME, database_role=None): - from google.cloud.spanner_v1.database import Database - + def _make_database( + name=DATABASE_NAME, + database_role=None, + default_transaction_options=DefaultTransactionOptions(), + ): database = mock.create_autospec(Database, instance=True) database.name = name database.log_commit_stats = False database.database_role = database_role database._route_to_leader_enabled = True + database.default_transaction_options = default_transaction_options + return database @staticmethod def _make_session_pb(name, labels=None, database_role=None): - from google.cloud.spanner_v1 import Session - - return Session(name=name, labels=labels, creator_role=database_role) + return SessionRequestProto(name=name, labels=labels, creator_role=database_role) def _make_spanner_api(self): - from google.cloud.spanner_v1 import SpannerClient - return mock.Mock(autospec=SpannerClient, instance=True) def test_constructor_wo_labels(self): @@ -144,9 +166,6 @@ def test_create_w_session_id(self): self.assertNoSpans() def test_create_w_database_role(self): - from google.cloud.spanner_v1 import CreateSessionRequest - from google.cloud.spanner_v1 import Session as SessionRequestProto - session_pb = self._make_session_pb( self.SESSION_NAME, database_role=self.DATABASE_ROLE ) @@ -180,9 +199,6 @@ def test_create_w_database_role(self): ) def test_create_session_span_annotations(self): - from google.cloud.spanner_v1 import CreateSessionRequest - from google.cloud.spanner_v1 import Session as SessionRequestProto - session_pb = self._make_session_pb( self.SESSION_NAME, database_role=self.DATABASE_ROLE ) @@ -217,8 +233,6 @@ def test_create_session_span_annotations(self): self.assertSpanEvents("TestSessionSpan", wantEventNames, span) def test_create_wo_database_role(self): - from google.cloud.spanner_v1 import CreateSessionRequest - session_pb = self._make_session_pb(self.SESSION_NAME) gax_api = self._make_spanner_api() gax_api.create_session.return_value = session_pb @@ -247,8 +261,6 @@ def test_create_wo_database_role(self): ) def test_create_ok(self): - from google.cloud.spanner_v1 import CreateSessionRequest - session_pb = self._make_session_pb(self.SESSION_NAME) gax_api = self._make_spanner_api() gax_api.create_session.return_value = session_pb @@ -277,9 +289,6 @@ def test_create_ok(self): ) def test_create_w_labels(self): - from google.cloud.spanner_v1 import CreateSessionRequest - from google.cloud.spanner_v1 import Session as SessionPB - labels = {"foo": "bar"} session_pb = self._make_session_pb(self.SESSION_NAME, labels=labels) gax_api = self._make_spanner_api() @@ -294,7 +303,7 @@ def test_create_w_labels(self): request = CreateSessionRequest( database=database.name, - session=SessionPB(labels=labels), + session=SessionRequestProto(labels=labels), ) gax_api.create_session.assert_called_once_with( @@ -311,8 +320,6 @@ def test_create_w_labels(self): ) def test_create_error(self): - from google.api_core.exceptions import Unknown - gax_api = self._make_spanner_api() gax_api.create_session.side_effect = Unknown("error") database = self._make_database() @@ -385,8 +392,6 @@ def test_exists_hit_wo_span(self): self.assertNoSpans() def test_exists_miss(self): - from google.api_core.exceptions import NotFound - gax_api = self._make_spanner_api() gax_api.get_session.side_effect = NotFound("testing") database = self._make_database() @@ -414,8 +419,6 @@ def test_exists_miss(self): False, ) def test_exists_miss_wo_span(self): - from google.api_core.exceptions import NotFound - gax_api = self._make_spanner_api() gax_api.get_session.side_effect = NotFound("testing") database = self._make_database() @@ -436,8 +439,6 @@ def test_exists_miss_wo_span(self): self.assertNoSpans() def test_exists_error(self): - from google.api_core.exceptions import Unknown - gax_api = self._make_spanner_api() gax_api.get_session.side_effect = Unknown("testing") database = self._make_database() @@ -469,8 +470,6 @@ def test_ping_wo_session_id(self): session.ping() def test_ping_hit(self): - from google.cloud.spanner_v1 import ExecuteSqlRequest - gax_api = self._make_spanner_api() gax_api.execute_sql.return_value = "1" database = self._make_database() @@ -491,9 +490,6 @@ def test_ping_hit(self): ) def test_ping_miss(self): - from google.api_core.exceptions import NotFound - from google.cloud.spanner_v1 import ExecuteSqlRequest - gax_api = self._make_spanner_api() gax_api.execute_sql.side_effect = NotFound("testing") database = self._make_database() @@ -515,9 +511,6 @@ def test_ping_miss(self): ) def test_ping_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1 import ExecuteSqlRequest - gax_api = self._make_spanner_api() gax_api.execute_sql.side_effect = Unknown("testing") database = self._make_database() @@ -570,8 +563,6 @@ def test_delete_hit(self): ) def test_delete_miss(self): - from google.cloud.exceptions import NotFound - gax_api = self._make_spanner_api() gax_api.delete_session.side_effect = NotFound("testing") database = self._make_database() @@ -597,8 +588,6 @@ def test_delete_miss(self): ) def test_delete_error(self): - from google.api_core.exceptions import Unknown - gax_api = self._make_spanner_api() gax_api.delete_session.side_effect = Unknown("testing") database = self._make_database() @@ -631,8 +620,6 @@ def test_snapshot_not_created(self): session.snapshot() def test_snapshot_created(self): - from google.cloud.spanner_v1.snapshot import Snapshot - database = self._make_database() session = self._make_one(database) session._session_id = "DEADBEEF" # emulate 'session.create()' @@ -645,8 +632,6 @@ def test_snapshot_created(self): self.assertFalse(snapshot._multi_use) def test_snapshot_created_w_multi_use(self): - from google.cloud.spanner_v1.snapshot import Snapshot - database = self._make_database() session = self._make_one(database) session._session_id = "DEADBEEF" # emulate 'session.create()' @@ -659,8 +644,6 @@ def test_snapshot_created_w_multi_use(self): self.assertTrue(snapshot._multi_use) def test_read_not_created(self): - from google.cloud.spanner_v1.keyset import KeySet - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] KEYS = ["bharney@example.com", "phred@example.com"] @@ -672,8 +655,6 @@ def test_read_not_created(self): session.read(TABLE_NAME, COLUMNS, KEYSET) def test_read(self): - from google.cloud.spanner_v1.keyset import KeySet - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] KEYS = ["bharney@example.com", "phred@example.com"] @@ -730,9 +711,6 @@ def test_execute_sql_defaults(self): ) def test_execute_sql_non_default_retry(self): - from google.protobuf.struct_pb2 import Struct, Value - from google.cloud.spanner_v1 import TypeCode - SQL = "SELECT first_name, age FROM citizens" database = self._make_database() session = self._make_one(database) @@ -761,9 +739,6 @@ def test_execute_sql_non_default_retry(self): ) def test_execute_sql_explicit(self): - from google.protobuf.struct_pb2 import Struct, Value - from google.cloud.spanner_v1 import TypeCode - SQL = "SELECT first_name, age FROM citizens" database = self._make_database() session = self._make_one(database) @@ -797,8 +772,6 @@ def test_batch_not_created(self): session.batch() def test_batch_created(self): - from google.cloud.spanner_v1.batch import Batch - database = self._make_database() session = self._make_one(database) session._session_id = "DEADBEEF" @@ -816,8 +789,6 @@ def test_transaction_not_created(self): session.transaction() def test_transaction_created(self): - from google.cloud.spanner_v1.transaction import Transaction - database = self._make_database() session = self._make_one(database) session._session_id = "DEADBEEF" @@ -840,11 +811,6 @@ def test_transaction_w_existing_txn(self): self.assertTrue(existing.rolled_back) def test_run_in_transaction_callback_raises_non_gax_error(self): - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - ) - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -889,12 +855,6 @@ def unit_of_work(txn, *args, **kw): gax_api.begin_transaction.assert_not_called() def test_run_in_transaction_callback_raises_non_abort_rpc_error(self): - from google.api_core.exceptions import Cancelled - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - ) - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -933,17 +893,6 @@ def unit_of_work(txn, *args, **kw): gax_api.rollback.assert_not_called() def test_run_in_transaction_w_args_w_kwargs_wo_abort(self): - import datetime - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1004,10 +953,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_commit_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1059,18 +1004,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_abort_no_retry_metadata(self): - import datetime - from google.api_core.exceptions import Aborted - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1143,20 +1076,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_abort_w_retry_metadata(self): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1242,20 +1161,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_callback_raises_abort_wo_metadata(self): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1331,20 +1236,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_abort_w_retry_metadata_deadline(self): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1421,14 +1312,6 @@ def _time(_results=[1, 1.5]): ) def test_run_in_transaction_w_timeout(self): - from google.api_core.exceptions import Aborted - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1510,17 +1393,6 @@ def _time(_results=[1, 2, 4, 8]): ) def test_run_in_transaction_w_commit_stats_success(self): - import datetime - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1587,14 +1459,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_commit_stats_error(self): - from google.api_core.exceptions import Unknown - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1655,17 +1519,6 @@ def unit_of_work(txn, *args, **kw): database.logger.info.assert_not_called() def test_run_in_transaction_w_transaction_tag(self): - import datetime - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1730,17 +1583,6 @@ def unit_of_work(txn, *args, **kw): ) def test_run_in_transaction_w_exclude_txn_from_change_streams(self): - import datetime - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1808,20 +1650,6 @@ def unit_of_work(txn, *args, **kw): def test_run_in_transaction_w_abort_w_retry_metadata_w_exclude_txn_from_change_streams( self, ): - import datetime - from google.api_core.exceptions import Aborted - from google.protobuf.duration_pb2 import Duration - from google.rpc.error_details_pb2 import RetryInfo - from google.cloud.spanner_v1 import CommitRequest - from google.cloud.spanner_v1 import CommitResponse - from google.cloud.spanner_v1 import ( - Transaction as TransactionPB, - TransactionOptions, - ) - from google.cloud._helpers import UTC - from google.cloud._helpers import _datetime_to_pb_timestamp - from google.cloud.spanner_v1.transaction import Transaction - TABLE_NAME = "citizens" COLUMNS = ["email", "first_name", "last_name", "age"] VALUES = [ @@ -1914,9 +1742,111 @@ def unit_of_work(txn, *args, **kw): * 2, ) - def test_delay_helper_w_no_delay(self): - from google.cloud.spanner_v1._helpers import _delay_until_retry + def test_run_in_transaction_w_isolation_level_at_request(self): + gax_api = self._make_spanner_api() + gax_api.begin_transaction.return_value = TransactionPB(id=b"FACEDACE") + database = self._make_database() + database.spanner_api = gax_api + session = self._make_one(database) + session._session_id = self.SESSION_ID + + def unit_of_work(txn, *args, **kw): + txn.insert("test", [], []) + return 42 + + return_value = session.run_in_transaction( + unit_of_work, "abc", isolation_level="SERIALIZABLE" + ) + + self.assertIsNone(session._transaction) + self.assertEqual(return_value, 42) + expected_options = TransactionOptions( + read_write=TransactionOptions.ReadWrite(), + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE, + ) + gax_api.begin_transaction.assert_called_once_with( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + ) + + def test_run_in_transaction_w_isolation_level_at_client(self): + gax_api = self._make_spanner_api() + gax_api.begin_transaction.return_value = TransactionPB(id=b"FACEDACE") + database = self._make_database( + default_transaction_options=DefaultTransactionOptions( + isolation_level="SERIALIZABLE" + ) + ) + database.spanner_api = gax_api + session = self._make_one(database) + session._session_id = self.SESSION_ID + + def unit_of_work(txn, *args, **kw): + txn.insert("test", [], []) + return 42 + + return_value = session.run_in_transaction(unit_of_work, "abc") + + self.assertIsNone(session._transaction) + self.assertEqual(return_value, 42) + + expected_options = TransactionOptions( + read_write=TransactionOptions.ReadWrite(), + isolation_level=TransactionOptions.IsolationLevel.SERIALIZABLE, + ) + gax_api.begin_transaction.assert_called_once_with( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + ) + + def test_run_in_transaction_w_isolation_level_at_request_overrides_client(self): + gax_api = self._make_spanner_api() + gax_api.begin_transaction.return_value = TransactionPB(id=b"FACEDACE") + database = self._make_database( + default_transaction_options=DefaultTransactionOptions( + isolation_level="SERIALIZABLE" + ) + ) + database.spanner_api = gax_api + session = self._make_one(database) + session._session_id = self.SESSION_ID + + def unit_of_work(txn, *args, **kw): + txn.insert("test", [], []) + return 42 + + return_value = session.run_in_transaction( + unit_of_work, + "abc", + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + ) + + self.assertIsNone(session._transaction) + self.assertEqual(return_value, 42) + + expected_options = TransactionOptions( + read_write=TransactionOptions.ReadWrite(), + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + ) + gax_api.begin_transaction.assert_called_once_with( + session=self.SESSION_NAME, + options=expected_options, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + ) + + def test_delay_helper_w_no_delay(self): metadata_mock = mock.Mock() metadata_mock.trailing_metadata.return_value = {} diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 6dc14fb7cd..11fc0135d1 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -116,12 +116,25 @@ def _make_spanner_api(self): return mock.create_autospec(SpannerClient, instance=True) def _call_fut( - self, derived, restart, request, span_name=None, session=None, attributes=None + self, + derived, + restart, + request, + span_name=None, + session=None, + attributes=None, + metadata=None, ): from google.cloud.spanner_v1.snapshot import _restart_on_unavailable return _restart_on_unavailable( - restart, request, span_name, session, attributes, transaction=derived + restart, + request, + metadata, + span_name, + session, + attributes, + transaction=derived, ) def _make_item(self, value, resume_token=b"", metadata=None): @@ -142,7 +155,7 @@ def test_iteration_w_empty_raw(self): derived = self._makeDerived(session) resumable = self._call_fut(derived, restart, request) self.assertEqual(list(resumable), []) - restart.assert_called_once_with(request=request) + restart.assert_called_once_with(request=request, metadata=None) self.assertNoSpans() def test_iteration_w_non_empty_raw(self): @@ -156,7 +169,7 @@ def test_iteration_w_non_empty_raw(self): derived = self._makeDerived(session) resumable = self._call_fut(derived, restart, request) self.assertEqual(list(resumable), list(ITEMS)) - restart.assert_called_once_with(request=request) + restart.assert_called_once_with(request=request, metadata=None) self.assertNoSpans() def test_iteration_w_raw_w_resume_tken(self): @@ -175,7 +188,7 @@ def test_iteration_w_raw_w_resume_tken(self): derived = self._makeDerived(session) resumable = self._call_fut(derived, restart, request) self.assertEqual(list(resumable), list(ITEMS)) - restart.assert_called_once_with(request=request) + restart.assert_called_once_with(request=request, metadata=None) self.assertNoSpans() def test_iteration_w_raw_raising_unavailable_no_token(self): @@ -246,7 +259,7 @@ def test_iteration_w_raw_raising_non_retryable_internal_error_no_token(self): resumable = self._call_fut(derived, restart, request) with self.assertRaises(InternalServerError): list(resumable) - restart.assert_called_once_with(request=request) + restart.assert_called_once_with(request=request, metadata=None) self.assertNoSpans() def test_iteration_w_raw_raising_unavailable(self): @@ -316,7 +329,7 @@ def test_iteration_w_raw_raising_non_retryable_internal_error(self): resumable = self._call_fut(derived, restart, request) with self.assertRaises(InternalServerError): list(resumable) - restart.assert_called_once_with(request=request) + restart.assert_called_once_with(request=request, metadata=None) self.assertNoSpans() def test_iteration_w_raw_raising_unavailable_after_token(self): @@ -487,7 +500,7 @@ def test_iteration_w_raw_raising_non_retryable_internal_error_after_token(self): resumable = self._call_fut(derived, restart, request) with self.assertRaises(InternalServerError): list(resumable) - restart.assert_called_once_with(request=request) + restart.assert_called_once_with(request=request, metadata=None) self.assertNoSpans() def test_iteration_w_span_creation(self): diff --git a/tests/unit/test_spanner.py b/tests/unit/test_spanner.py index ff34a109af..8bd95c7228 100644 --- a/tests/unit/test_spanner.py +++ b/tests/unit/test_spanner.py @@ -32,6 +32,7 @@ ExecuteBatchDmlRequest, ExecuteBatchDmlResponse, param_types, + DefaultTransactionOptions, ) from google.cloud.spanner_v1.types import transaction as transaction_type from google.cloud.spanner_v1.keyset import KeySet @@ -138,6 +139,7 @@ def _execute_update_helper( count=0, query_options=None, exclude_txn_from_change_streams=False, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, ): stats_pb = ResultSetStats(row_count_exact=1) @@ -147,6 +149,7 @@ def _execute_update_helper( transaction.transaction_tag = self.TRANSACTION_TAG transaction.exclude_txn_from_change_streams = exclude_txn_from_change_streams + transaction.isolation_level = isolation_level transaction._execute_sql_count = count row_count = transaction.execute_update( @@ -168,12 +171,14 @@ def _execute_update_expected_request( begin=True, count=0, exclude_txn_from_change_streams=False, + isolation_level=TransactionOptions.IsolationLevel.ISOLATION_LEVEL_UNSPECIFIED, ): if begin is True: expected_transaction = TransactionSelector( begin=TransactionOptions( read_write=TransactionOptions.ReadWrite(), exclude_txn_from_change_streams=exclude_txn_from_change_streams, + isolation_level=isolation_level, ) ) else: @@ -593,6 +598,32 @@ def test_transaction_should_include_begin_w_exclude_txn_from_change_streams_with ], ) + def test_transaction_should_include_begin_w_isolation_level_with_first_update( + self, + ): + database = _Database() + session = _Session(database) + api = database.spanner_api = self._make_spanner_api() + transaction = self._make_one(session) + self._execute_update_helper( + transaction=transaction, + api=api, + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + ) + + api.execute_sql.assert_called_once_with( + request=self._execute_update_expected_request( + database=database, + isolation_level=TransactionOptions.IsolationLevel.REPEATABLE_READ, + ), + retry=RETRY, + timeout=TIMEOUT, + metadata=[ + ("google-cloud-resource-prefix", database.name), + ("x-goog-spanner-route-to-leader", "true"), + ], + ) + def test_transaction_should_use_transaction_id_if_error_with_first_batch_update( self, ): @@ -1060,6 +1091,7 @@ def __init__(self): self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") self.directed_read_options = None + self.default_transaction_options = DefaultTransactionOptions() class _Instance(object): @@ -1073,6 +1105,7 @@ def __init__(self): self._instance = _Instance() self._route_to_leader_enabled = True self._directed_read_options = None + self.default_transaction_options = DefaultTransactionOptions() class _Session(object): diff --git a/tests/unit/test_spanner_metrics_tracer_factory.py b/tests/unit/test_spanner_metrics_tracer_factory.py new file mode 100644 index 0000000000..8ee4d53d3d --- /dev/null +++ b/tests/unit/test_spanner_metrics_tracer_factory.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.cloud.spanner_v1.metrics.spanner_metrics_tracer_factory import ( + SpannerMetricsTracerFactory, +) + + +class TestSpannerMetricsTracerFactory: + def test_new_instance_creation(self): + factory1 = SpannerMetricsTracerFactory(enabled=True) + factory2 = SpannerMetricsTracerFactory(enabled=True) + assert factory1 is factory2 # Should return the same instance + + def test_generate_client_uid_format(self): + client_uid = SpannerMetricsTracerFactory._generate_client_uid() + assert isinstance(client_uid, str) + assert len(client_uid.split("@")) == 3 # Should contain uuid, pid, and hostname + + def test_generate_client_hash(self): + client_uid = "123e4567-e89b-12d3-a456-426614174000@1234@hostname" + client_hash = SpannerMetricsTracerFactory._generate_client_hash(client_uid) + assert isinstance(client_hash, str) + assert len(client_hash) == 6 # Should be a 6-digit hex string + + def test_get_instance_config(self): + instance_config = SpannerMetricsTracerFactory._get_instance_config() + assert instance_config == "unknown" # As per the current implementation + + def test_get_client_name(self): + client_name = SpannerMetricsTracerFactory._get_client_name() + assert isinstance(client_name, str) + assert "spanner-python" in client_name + + def test_get_location(self): + location = SpannerMetricsTracerFactory._get_location() + assert isinstance(location, str) + assert location # Simply asserting for non empty as this can change depending on the instance this test runs in. diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index d355d283fe..ddc91ea522 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -16,6 +16,7 @@ import mock from google.cloud.spanner_v1 import RequestOptions +from google.cloud.spanner_v1 import DefaultTransactionOptions from google.cloud.spanner_v1 import Type from google.cloud.spanner_v1 import TypeCode from google.api_core.retry import Retry @@ -309,7 +310,9 @@ def test_rollback_ok(self): ) def test_commit_not_begun(self): - session = _Session() + database = _Database() + database.spanner_api = self._make_spanner_api() + session = _Session(database) transaction = self._make_one(session) with self.assertRaises(ValueError): transaction.commit() @@ -337,7 +340,9 @@ def test_commit_not_begun(self): assert got_span_events_statuses == want_span_events_statuses def test_commit_already_committed(self): - session = _Session() + database = _Database() + database.spanner_api = self._make_spanner_api() + session = _Session(database) transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID transaction.committed = object() @@ -367,7 +372,9 @@ def test_commit_already_committed(self): assert got_span_events_statuses == want_span_events_statuses def test_commit_already_rolled_back(self): - session = _Session() + database = _Database() + database.spanner_api = self._make_spanner_api() + session = _Session(database) transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID transaction.rolled_back = True @@ -1015,6 +1022,7 @@ def __init__(self): self._instance = _Instance() self._route_to_leader_enabled = True self._directed_read_options = None + self.default_transaction_options = DefaultTransactionOptions() class _Session(object): pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy