From 6869ed651e41d7a8af046884bc6c792a4177f766 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Thu, 14 Nov 2024 20:23:28 -0800 Subject: [PATCH 01/13] feat(spanner): implement custom tracer_provider injection for opentelemetry traces (#1229) * all: implement custom tracer_provider injection An important feature for observability is to allow the injection of a custom tracer_provider instead of always using the global tracer_provider by sending in observability_options=dict( tracer_provider=tracer_provider, enable_extended_tracing=True, ) * Address review feedback by attaching observability_options to Client only * Attach observability_options directly before trace_call * More reverts for formatting * Plumb observability_options into _restart_on_unavailable * completely decouple observability_options from session * apply SPANNER_ENABLE_EXTENDED_TRACING but in inverse due to compatibility * Document SPANNER_ENABLE_EXTENDED_TRACING in environment * Revert a vestige of mock * tests: add unit test for propagating TracerProvider * Add preliminary end-to-end test to check for injection of observability_options * Document default enable_extended_tracing value * Carve out observability_options test * Ensure that observability_options test sets up and deletes database * Ensure instance.create() is invoked in system tests * Use getattr for mock _Client * Update with code review suggestions * Deal with mock.Mock false positives failing tests * Address review feedback --- docs/opentelemetry-tracing.rst | 25 +++- examples/trace.py | 11 +- .../spanner_v1/_opentelemetry_tracing.py | 27 +++- google/cloud/spanner_v1/batch.py | 16 ++- google/cloud/spanner_v1/client.py | 21 +++ google/cloud/spanner_v1/database.py | 12 ++ google/cloud/spanner_v1/session.py | 20 ++- google/cloud/spanner_v1/snapshot.py | 58 ++++++-- google/cloud/spanner_v1/transaction.py | 42 +++++- tests/system/test_observability_options.py | 134 ++++++++++++++++++ 10 files changed, 339 insertions(+), 27 deletions(-) create mode 100644 tests/system/test_observability_options.py diff --git a/docs/opentelemetry-tracing.rst b/docs/opentelemetry-tracing.rst index cb9a2b1350..c715ad58ad 100644 --- a/docs/opentelemetry-tracing.rst +++ b/docs/opentelemetry-tracing.rst @@ -25,12 +25,21 @@ We also need to tell OpenTelemetry which exporter to use. To export Spanner trac # Create and export one trace every 1000 requests sampler = TraceIdRatioBased(1/1000) - # Use the default tracer provider - trace.set_tracer_provider(TracerProvider(sampler=sampler)) - trace.get_tracer_provider().add_span_processor( + tracer_provider = TracerProvider(sampler=sampler) + tracer_provider.add_span_processor( # Initialize the cloud tracing exporter BatchSpanProcessor(CloudTraceSpanExporter()) ) + observability_options = dict( + tracer_provider=tracer_provider, + + # By default extended_tracing is set to True due + # to legacy reasons to avoid breaking changes, you + # can modify it though using the environment variable + # SPANNER_ENABLE_EXTENDED_TRACING=false. + enable_extended_tracing=False, + ) + spanner = spanner.NewClient(project_id, observability_options=observability_options) To get more fine-grained traces from gRPC, you can enable the gRPC instrumentation by the following @@ -52,3 +61,13 @@ Generated spanner traces should now be available on `Cloud Trace `_ + +Annotating spans with SQL +~~~~~~~~~~~~~~~~~~~~~~~~~ + +By default your spans will be annotated with SQL statements where appropriate, but that can be a PII (Personally Identifiable Information) +leak. Sadly due to legacy behavior, we cannot simply turn off this behavior by default. However you can control this behavior by setting + + SPANNER_ENABLE_EXTENDED_TRACING=false + +to turn it off globally or when creating each SpannerClient, please set `observability_options.enable_extended_tracing=false` diff --git a/examples/trace.py b/examples/trace.py index 791b6cd20b..e7659e13e2 100644 --- a/examples/trace.py +++ b/examples/trace.py @@ -32,15 +32,18 @@ def main(): tracer_provider = TracerProvider(sampler=ALWAYS_ON) trace_exporter = CloudTraceSpanExporter(project_id=project_id) tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter)) - trace.set_tracer_provider(tracer_provider) - # Retrieve a tracer from the global tracer provider. - tracer = tracer_provider.get_tracer('MyApp') # Setup the Cloud Spanner Client. - spanner_client = spanner.Client(project_id) + spanner_client = spanner.Client( + project_id, + observability_options=dict(tracer_provider=tracer_provider, enable_extended_tracing=True), + ) instance = spanner_client.instance('test-instance') database = instance.database('test-db') + # Retrieve a tracer from our custom tracer provider. + tracer = tracer_provider.get_tracer('MyApp') + # Now run our queries with tracer.start_as_current_span('QueryInformationSchema'): with database.snapshot() as snapshot: diff --git a/google/cloud/spanner_v1/_opentelemetry_tracing.py b/google/cloud/spanner_v1/_opentelemetry_tracing.py index 51501a07a3..feb3b92756 100644 --- a/google/cloud/spanner_v1/_opentelemetry_tracing.py +++ b/google/cloud/spanner_v1/_opentelemetry_tracing.py @@ -15,6 +15,7 @@ """Manages OpenTelemetry trace creation and handling""" from contextlib import contextmanager +import os from google.cloud.spanner_v1 import SpannerClient from google.cloud.spanner_v1 import gapic_version @@ -33,6 +34,9 @@ TRACER_NAME = "cloud.google.com/python/spanner" TRACER_VERSION = gapic_version.__version__ +extended_tracing_globally_disabled = ( + os.getenv("SPANNER_ENABLE_EXTENDED_TRACING", "").lower() == "false" +) def get_tracer(tracer_provider=None): @@ -51,13 +55,26 @@ def get_tracer(tracer_provider=None): @contextmanager -def trace_call(name, session, extra_attributes=None): +def trace_call(name, session, extra_attributes=None, observability_options=None): if not HAS_OPENTELEMETRY_INSTALLED or not session: # Empty context manager. Users will have to check if the generated value is None or a span yield None return - tracer = get_tracer() + tracer_provider = None + + # By default enable_extended_tracing=True because in a bid to minimize + # breaking changes and preserve legacy behavior, we are keeping it turned + # on by default. + enable_extended_tracing = True + + if isinstance(observability_options, dict): # Avoid false positives with mock.Mock + tracer_provider = observability_options.get("tracer_provider", None) + enable_extended_tracing = observability_options.get( + "enable_extended_tracing", enable_extended_tracing + ) + + tracer = get_tracer(tracer_provider) # Set base attributes that we know for every trace created attributes = { @@ -72,6 +89,12 @@ def trace_call(name, session, extra_attributes=None): if extra_attributes: attributes.update(extra_attributes) + if extended_tracing_globally_disabled: + enable_extended_tracing = False + + if not enable_extended_tracing: + attributes.pop("db.statement", False) + with tracer.start_as_current_span( name, kind=trace.SpanKind.CLIENT, attributes=attributes ) as span: diff --git a/google/cloud/spanner_v1/batch.py b/google/cloud/spanner_v1/batch.py index e3d681189c..948740d7d4 100644 --- a/google/cloud/spanner_v1/batch.py +++ b/google/cloud/spanner_v1/batch.py @@ -205,7 +205,13 @@ def commit( max_commit_delay=max_commit_delay, request_options=request_options, ) - with trace_call("CloudSpanner.Commit", self._session, trace_attributes): + observability_options = getattr(database, "observability_options", None) + with trace_call( + "CloudSpanner.Commit", + self._session, + trace_attributes, + observability_options=observability_options, + ): method = functools.partial( api.commit, request=request, @@ -318,7 +324,13 @@ def batch_write(self, request_options=None, exclude_txn_from_change_streams=Fals request_options=request_options, exclude_txn_from_change_streams=exclude_txn_from_change_streams, ) - with trace_call("CloudSpanner.BatchWrite", self._session, trace_attributes): + observability_options = getattr(database, "observability_options", None) + with trace_call( + "CloudSpanner.BatchWrite", + self._session, + trace_attributes, + observability_options=observability_options, + ): method = functools.partial( api.batch_write, request=request, diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index f8f3fdb72c..afe6264717 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -126,6 +126,16 @@ class Client(ClientWithProject): for all ReadRequests and ExecuteSqlRequests that indicates which replicas or regions should be used for non-transactional reads or queries. + :type observability_options: dict (str -> any) or None + :param observability_options: (Optional) the configuration to control + the tracer's behavior. + tracer_provider is the injected tracer provider + enable_extended_tracing: :type:boolean when set to true will allow for + spans that issue SQL statements to be annotated with SQL. + Default `True`, please set it to `False` to turn it off + or you can use the environment variable `SPANNER_ENABLE_EXTENDED_TRACING=` + to control it. + :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` """ @@ -146,6 +156,7 @@ def __init__( query_options=None, route_to_leader_enabled=True, directed_read_options=None, + observability_options=None, ): self._emulator_host = _get_spanner_emulator_host() @@ -187,6 +198,7 @@ def __init__( self._route_to_leader_enabled = route_to_leader_enabled self._directed_read_options = directed_read_options + self._observability_options = observability_options @property def credentials(self): @@ -268,6 +280,15 @@ def route_to_leader_enabled(self): """ return self._route_to_leader_enabled + @property + def observability_options(self): + """Getter for observability_options. + + :rtype: dict + :returns: The configured observability_options if set. + """ + return self._observability_options + @property def directed_read_options(self): """Getter for directed_read_options. diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index f6c4ceb667..abddd5d97d 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -718,6 +718,7 @@ def execute_pdml(): method=method, request=request, transaction_selector=txn_selector, + observability_options=self.observability_options, ) result_set = StreamedResultSet(iterator) @@ -1106,6 +1107,17 @@ def set_iam_policy(self, policy): response = api.set_iam_policy(request=request, metadata=metadata) return response + @property + def observability_options(self): + """ + Returns the observability options that you set when creating + the SpannerClient. + """ + if not (self._instance and self._instance._client): + return None + + return getattr(self._instance._client, "observability_options", None) + class BatchCheckout(object): """Context manager for using a batch from a database. diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 28280282f4..6281148590 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -142,7 +142,13 @@ def create(self): if self._labels: request.session.labels = self._labels - with trace_call("CloudSpanner.CreateSession", self, self._labels): + observability_options = getattr(self._database, "observability_options", None) + with trace_call( + "CloudSpanner.CreateSession", + self, + self._labels, + observability_options=observability_options, + ): session_pb = api.create_session( request=request, metadata=metadata, @@ -169,7 +175,10 @@ def exists(self): ) ) - with trace_call("CloudSpanner.GetSession", self) as span: + observability_options = getattr(self._database, "observability_options", None) + with trace_call( + "CloudSpanner.GetSession", self, observability_options=observability_options + ) as span: try: api.get_session(name=self.name, metadata=metadata) if span: @@ -194,7 +203,12 @@ def delete(self): raise ValueError("Session ID not set by back-end") api = self._database.spanner_api metadata = _metadata_with_prefix(self._database.name) - with trace_call("CloudSpanner.DeleteSession", self): + observability_options = getattr(self._database, "observability_options", None) + with trace_call( + "CloudSpanner.DeleteSession", + self, + observability_options=observability_options, + ): api.delete_session(name=self.name, metadata=metadata) def ping(self): diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index 3bc1a746bd..a02776b27c 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -56,6 +56,7 @@ def _restart_on_unavailable( attributes=None, transaction=None, transaction_selector=None, + observability_options=None, ): """Restart iteration after :exc:`.ServiceUnavailable`. @@ -84,7 +85,10 @@ def _restart_on_unavailable( ) request.transaction = transaction_selector - with trace_call(trace_name, session, attributes): + + with trace_call( + trace_name, session, attributes, observability_options=observability_options + ): iterator = method(request=request) while True: try: @@ -104,7 +108,12 @@ def _restart_on_unavailable( break except ServiceUnavailable: del item_buffer[:] - with trace_call(trace_name, session, attributes): + with trace_call( + trace_name, + session, + attributes, + observability_options=observability_options, + ): request.resume_token = resume_token if transaction is not None: transaction_selector = transaction._make_txn_selector() @@ -119,7 +128,12 @@ def _restart_on_unavailable( if not resumable_error: raise del item_buffer[:] - with trace_call(trace_name, session, attributes): + with trace_call( + trace_name, + session, + attributes, + observability_options=observability_options, + ): request.resume_token = resume_token if transaction is not None: transaction_selector = transaction._make_txn_selector() @@ -299,6 +313,7 @@ def read( ) trace_attributes = {"table_id": table, "columns": columns} + observability_options = getattr(database, "observability_options", None) if self._transaction_id is None: # lock is added to handle the inline begin for first rpc @@ -310,6 +325,7 @@ def read( self._session, trace_attributes, transaction=self, + observability_options=observability_options, ) self._read_request_count += 1 if self._multi_use: @@ -326,6 +342,7 @@ def read( self._session, trace_attributes, transaction=self, + observability_options=observability_options, ) self._read_request_count += 1 @@ -489,19 +506,35 @@ def execute_sql( ) trace_attributes = {"db.statement": sql} + observability_options = getattr(database, "observability_options", None) if self._transaction_id is None: # lock is added to handle the inline begin for first rpc with self._lock: return self._get_streamed_result_set( - restart, request, trace_attributes, column_info + restart, + request, + trace_attributes, + column_info, + observability_options, ) else: return self._get_streamed_result_set( - restart, request, trace_attributes, column_info + restart, + request, + trace_attributes, + column_info, + observability_options, ) - def _get_streamed_result_set(self, restart, request, trace_attributes, column_info): + def _get_streamed_result_set( + self, + restart, + request, + trace_attributes, + column_info, + observability_options=None, + ): iterator = _restart_on_unavailable( restart, request, @@ -509,6 +542,7 @@ def _get_streamed_result_set(self, restart, request, trace_attributes, column_in self._session, trace_attributes, transaction=self, + observability_options=observability_options, ) self._read_request_count += 1 self._execute_sql_count += 1 @@ -598,7 +632,10 @@ def partition_read( trace_attributes = {"table_id": table, "columns": columns} with trace_call( - "CloudSpanner.PartitionReadOnlyTransaction", self._session, trace_attributes + "CloudSpanner.PartitionReadOnlyTransaction", + self._session, + trace_attributes, + observability_options=getattr(database, "observability_options", None), ): method = functools.partial( api.partition_read, @@ -701,6 +738,7 @@ def partition_query( "CloudSpanner.PartitionReadWriteTransaction", self._session, trace_attributes, + observability_options=getattr(database, "observability_options", None), ): method = functools.partial( api.partition_query, @@ -843,7 +881,11 @@ def begin(self): (_metadata_with_leader_aware_routing(database._route_to_leader_enabled)) ) txn_selector = self._make_txn_selector() - with trace_call("CloudSpanner.BeginTransaction", self._session): + with trace_call( + "CloudSpanner.BeginTransaction", + self._session, + observability_options=getattr(database, "observability_options", None), + ): method = functools.partial( api.begin_transaction, session=self._session.name, diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index c872cc380d..beb3e46edb 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -98,7 +98,13 @@ def _make_txn_selector(self): return TransactionSelector(id=self._transaction_id) def _execute_request( - self, method, request, trace_name=None, session=None, attributes=None + self, + method, + request, + trace_name=None, + session=None, + attributes=None, + observability_options=None, ): """Helper method to execute request after fetching transaction selector. @@ -110,7 +116,9 @@ def _execute_request( """ transaction = self._make_txn_selector() request.transaction = transaction - with trace_call(trace_name, session, attributes): + with trace_call( + trace_name, session, attributes, observability_options=observability_options + ): method = functools.partial(method, request=request) response = _retry( method, @@ -147,7 +155,12 @@ def begin(self): read_write=TransactionOptions.ReadWrite(), exclude_txn_from_change_streams=self.exclude_txn_from_change_streams, ) - with trace_call("CloudSpanner.BeginTransaction", self._session): + observability_options = getattr(database, "observability_options", None) + with trace_call( + "CloudSpanner.BeginTransaction", + self._session, + observability_options=observability_options, + ): method = functools.partial( api.begin_transaction, session=self._session.name, @@ -175,7 +188,12 @@ def rollback(self): database._route_to_leader_enabled ) ) - with trace_call("CloudSpanner.Rollback", self._session): + observability_options = getattr(database, "observability_options", None) + with trace_call( + "CloudSpanner.Rollback", + self._session, + observability_options=observability_options, + ): method = functools.partial( api.rollback, session=self._session.name, @@ -248,7 +266,13 @@ def commit( max_commit_delay=max_commit_delay, request_options=request_options, ) - with trace_call("CloudSpanner.Commit", self._session, trace_attributes): + observability_options = getattr(database, "observability_options", None) + with trace_call( + "CloudSpanner.Commit", + self._session, + trace_attributes, + observability_options, + ): method = functools.partial( api.commit, request=request, @@ -362,6 +386,9 @@ def execute_update( # environment-level options default_query_options = database._instance._client._query_options query_options = _merge_query_options(default_query_options, query_options) + observability_options = getattr( + database._instance._client, "observability_options", None + ) if request_options is None: request_options = RequestOptions() @@ -399,6 +426,7 @@ def execute_update( "CloudSpanner.ReadWriteTransaction", self._session, trace_attributes, + observability_options=observability_options, ) # Setting the transaction id because the transaction begin was inlined for first rpc. if ( @@ -415,6 +443,7 @@ def execute_update( "CloudSpanner.ReadWriteTransaction", self._session, trace_attributes, + observability_options=observability_options, ) return response.stats.row_count_exact @@ -481,6 +510,7 @@ def batch_update( _metadata_with_leader_aware_routing(database._route_to_leader_enabled) ) api = database.spanner_api + observability_options = getattr(database, "observability_options", None) seqno, self._execute_sql_count = ( self._execute_sql_count, @@ -521,6 +551,7 @@ def batch_update( "CloudSpanner.DMLTransaction", self._session, trace_attributes, + observability_options=observability_options, ) # Setting the transaction id because the transaction begin was inlined for first rpc. for result_set in response.result_sets: @@ -538,6 +569,7 @@ def batch_update( "CloudSpanner.DMLTransaction", self._session, trace_attributes, + observability_options=observability_options, ) row_counts = [ diff --git a/tests/system/test_observability_options.py b/tests/system/test_observability_options.py new file mode 100644 index 0000000000..8382255c15 --- /dev/null +++ b/tests/system/test_observability_options.py @@ -0,0 +1,134 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pytest + +from . import _helpers +from google.cloud.spanner_v1 import Client + +HAS_OTEL_INSTALLED = False + +try: + from opentelemetry.sdk.trace.export import SimpleSpanProcessor + from opentelemetry.sdk.trace.export.in_memory_span_exporter import ( + InMemorySpanExporter, + ) + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.sampling import ALWAYS_ON + from opentelemetry import trace + + HAS_OTEL_INSTALLED = True +except ImportError: + pass + + +@pytest.mark.skipif( + not HAS_OTEL_INSTALLED, reason="OpenTelemetry is necessary to test traces." +) +@pytest.mark.skipif( + not _helpers.USE_EMULATOR, reason="mulator is necessary to test traces." +) +def test_observability_options_propagation(): + PROJECT = _helpers.EMULATOR_PROJECT + CONFIGURATION_NAME = "config-name" + INSTANCE_ID = _helpers.INSTANCE_ID + DISPLAY_NAME = "display-name" + DATABASE_ID = _helpers.unique_id("temp_db") + NODE_COUNT = 5 + LABELS = {"test": "true"} + + def test_propagation(enable_extended_tracing): + global_tracer_provider = TracerProvider(sampler=ALWAYS_ON) + trace.set_tracer_provider(global_tracer_provider) + global_trace_exporter = InMemorySpanExporter() + global_tracer_provider.add_span_processor( + SimpleSpanProcessor(global_trace_exporter) + ) + + inject_tracer_provider = TracerProvider(sampler=ALWAYS_ON) + inject_trace_exporter = InMemorySpanExporter() + inject_tracer_provider.add_span_processor( + SimpleSpanProcessor(inject_trace_exporter) + ) + observability_options = dict( + tracer_provider=inject_tracer_provider, + enable_extended_tracing=enable_extended_tracing, + ) + client = Client( + project=PROJECT, + observability_options=observability_options, + credentials=_make_credentials(), + ) + + instance = client.instance( + INSTANCE_ID, + CONFIGURATION_NAME, + display_name=DISPLAY_NAME, + node_count=NODE_COUNT, + labels=LABELS, + ) + + try: + instance.create() + except Exception: + pass + + db = instance.database(DATABASE_ID) + try: + db.create() + except Exception: + pass + + assert db.observability_options == observability_options + with db.snapshot() as snapshot: + res = snapshot.execute_sql("SELECT 1") + for val in res: + _ = val + + from_global_spans = global_trace_exporter.get_finished_spans() + from_inject_spans = inject_trace_exporter.get_finished_spans() + assert ( + len(from_global_spans) == 0 + ) # "Expecting no spans from the global trace exporter" + assert ( + len(from_inject_spans) >= 2 + ) # "Expecting at least 2 spans from the injected trace exporter" + gotNames = [span.name for span in from_inject_spans] + wantNames = ["CloudSpanner.CreateSession", "CloudSpanner.ReadWriteTransaction"] + assert gotNames == wantNames + + # Check for conformance of enable_extended_tracing + lastSpan = from_inject_spans[len(from_inject_spans) - 1] + wantAnnotatedSQL = "SELECT 1" + if not enable_extended_tracing: + wantAnnotatedSQL = None + assert ( + lastSpan.attributes.get("db.statement", None) == wantAnnotatedSQL + ) # "Mismatch in annotated sql" + + try: + db.delete() + instance.delete() + except Exception: + pass + + # Test the respective options for enable_extended_tracing + test_propagation(True) + test_propagation(False) + + +def _make_credentials(): + from google.auth.credentials import AnonymousCredentials + + return AnonymousCredentials() From 054a18658eedc5d4dbecb7508baa3f3d67f5b815 Mon Sep 17 00:00:00 2001 From: Sally-Ye Date: Sun, 17 Nov 2024 23:48:38 -0500 Subject: [PATCH 02/13] docs(samples): Add samples for Cloud Spanner Default Backup Schedules (#1238) * chore(samples): Add samples for Cloud Spanner Default Backup Schedules * chore(samples): Add samples for Cloud Spanner Default Backup Schedules Fix field name in code samples. * chore(samples): Add samples for Cloud Spanner Default Backup Schedules Fix field name in code samples. --------- Co-authored-by: Sri Harsha CH <57220027+harshachinta@users.noreply.github.com> --- samples/samples/requirements.txt | 2 +- samples/samples/snippets.py | 51 ++++++++++++++++++++++++++++++++ samples/samples/snippets_test.py | 19 ++++++++++++ 3 files changed, 71 insertions(+), 1 deletion(-) diff --git a/samples/samples/requirements.txt b/samples/samples/requirements.txt index 5a108d39ef..4009a0a00b 100644 --- a/samples/samples/requirements.txt +++ b/samples/samples/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-spanner==3.49.1 +google-cloud-spanner==3.50.0 futures==3.4.0; python_version < "3" diff --git a/samples/samples/snippets.py b/samples/samples/snippets.py index c958a66822..6650ebe88d 100644 --- a/samples/samples/snippets.py +++ b/samples/samples/snippets.py @@ -3222,6 +3222,57 @@ def create_instance_with_autoscaling_config(instance_id): # [END spanner_create_instance_with_autoscaling_config] +# [START spanner_create_instance_without_default_backup_schedule] +def create_instance_without_default_backup_schedules(instance_id): + spanner_client = spanner.Client() + config_name = "{}/instanceConfigs/regional-me-central2".format( + spanner_client.project_name + ) + + operation = spanner_client.instance_admin_api.create_instance( + parent=spanner_client.project_name, + instance_id=instance_id, + instance=spanner_instance_admin.Instance( + config=config_name, + display_name="This is a display name.", + node_count=1, + default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.NONE, # Optional + ), + ) + + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + + print("Created instance {} without default backup schedules".format(instance_id)) + + +# [END spanner_create_instance_without_default_backup_schedule] + + +# [START spanner_update_instance_default_backup_schedule_type] +def update_instance_default_backup_schedule_type(instance_id): + spanner_client = spanner.Client() + + name = "{}/instances/{}".format(spanner_client.project_name, instance_id) + + operation = spanner_client.instance_admin_api.update_instance( + instance=spanner_instance_admin.Instance( + name=name, + default_backup_schedule_type=spanner_instance_admin.Instance.DefaultBackupScheduleType.AUTOMATIC, # Optional + ), + field_mask=field_mask_pb2.FieldMask( + paths=["default_backup_schedule_type"] + ), + ) + + print("Waiting for operation to complete...") + operation.result(OPERATION_TIMEOUT_SECONDS) + + print("Updated instance {} to have default backup schedules".format(instance_id)) + +# [END spanner_update_instance_default_backup_schedule_type] + + def add_proto_type_columns(instance_id, database_id): # [START spanner_add_proto_type_columns] # instance_id = "your-spanner-instance" diff --git a/samples/samples/snippets_test.py b/samples/samples/snippets_test.py index ba3c0bbfe7..87fa7a43a2 100644 --- a/samples/samples/snippets_test.py +++ b/samples/samples/snippets_test.py @@ -197,6 +197,25 @@ def test_create_instance_with_autoscaling_config(capsys, lci_instance_id): retry_429(instance.delete)() +def test_create_and_update_instance_default_backup_schedule_type(capsys, lci_instance_id): + retry_429(snippets.create_instance_without_default_backup_schedules)( + lci_instance_id, + ) + create_out, _ = capsys.readouterr() + assert lci_instance_id in create_out + assert "without default backup schedules" in create_out + + retry_429(snippets.update_instance_default_backup_schedule_type)( + lci_instance_id, + ) + update_out, _ = capsys.readouterr() + assert lci_instance_id in update_out + assert "to have default backup schedules" in update_out + spanner_client = spanner.Client() + instance = spanner_client.instance(lci_instance_id) + retry_429(instance.delete)() + + def test_create_instance_partition(capsys, instance_partition_instance_id): # Unable to use create_instance since it has editions set where partitions are unsupported. # The minimal requirement for editions is ENTERPRISE_PLUS for the paritions to get supported. From ccae6e0287ba6cf3c14f15a907b2106b11ef1fdc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Mon, 2 Dec 2024 11:33:24 +0100 Subject: [PATCH 03/13] perf: optimize ResultSet decoding (#1244) * perf: optimize ResultSet decoding ResultSet decoding went through a long if-elif-else construct for every row and every column to determine how to decode that specific cell. This caused large result sets to see a significantly higher decoding time than necessary, as determining how to decode a column only needs to be determined once for the entire ResultSet. This change therefore collects the decoders once before starting to decode any rows. It does this by: 1. Iterating over the columns in the ResultSet and get a decoder for the specific type of that column. 2. Store those decoders as function references in an array. 3. Pick the appropriate function directly from this array each time a column needs to be decoded. Selecting and decoding a query result with 100 rows consisting of 24 columns (one for each supported data type) takes ~35-40ms without this change, and ~18-20ms with this change. The following benchmarks were executed locally against an in-mem mock Spanner server running in Java. The latter was chosen because: 1. We have a random ResultSet generator in Java that can be used for this. 2. Having the mock Spanner server running in a separate process and in another programming language reduces the chance that the mock server itself has an impact on the differences that we see between the different runs. Results without this change (100 iterations): ``` Elapsed: 43.5490608215332 ms Elapsed: 39.53838348388672 ms Elapsed: 38.68389129638672 ms Elapsed: 38.26117515563965 ms Elapsed: 38.28692436218262 ms Elapsed: 38.12098503112793 ms Elapsed: 39.016008377075195 ms Elapsed: 38.15174102783203 ms Elapsed: 38.3448600769043 ms Elapsed: 38.00082206726074 ms Elapsed: 38.0091667175293 ms Elapsed: 38.02800178527832 ms Elapsed: 38.03110122680664 ms Elapsed: 38.42306137084961 ms Elapsed: 38.535356521606445 ms Elapsed: 38.86699676513672 ms Elapsed: 38.702964782714844 ms Elapsed: 38.881778717041016 ms Elapsed: 38.08116912841797 ms Elapsed: 38.084983825683594 ms Elapsed: 38.04278373718262 ms Elapsed: 38.74492645263672 ms Elapsed: 38.57111930847168 ms Elapsed: 38.17009925842285 ms Elapsed: 38.64407539367676 ms Elapsed: 38.00559043884277 ms Elapsed: 38.06161880493164 ms Elapsed: 38.233280181884766 ms Elapsed: 38.48695755004883 ms Elapsed: 38.71011734008789 ms Elapsed: 37.92428970336914 ms Elapsed: 38.8491153717041 ms Elapsed: 38.90705108642578 ms Elapsed: 38.20919990539551 ms Elapsed: 38.07401657104492 ms Elapsed: 38.30099105834961 ms Elapsed: 38.07377815246582 ms Elapsed: 38.61117362976074 ms Elapsed: 39.58392143249512 ms Elapsed: 39.69216346740723 ms Elapsed: 38.27810287475586 ms Elapsed: 37.88185119628906 ms Elapsed: 38.763999938964844 ms Elapsed: 39.05320167541504 ms Elapsed: 38.82408142089844 ms Elapsed: 38.47217559814453 ms Elapsed: 38.024187088012695 ms Elapsed: 38.07687759399414 ms Elapsed: 38.11931610107422 ms Elapsed: 37.9488468170166 ms Elapsed: 38.04421424865723 ms Elapsed: 38.57421875 ms Elapsed: 39.543867111206055 ms Elapsed: 38.4981632232666 ms Elapsed: 37.89806365966797 ms Elapsed: 38.0861759185791 ms Elapsed: 38.72990608215332 ms Elapsed: 38.47217559814453 ms Elapsed: 38.71774673461914 ms Elapsed: 38.27619552612305 ms Elapsed: 38.08403015136719 ms Elapsed: 38.6350154876709 ms Elapsed: 38.03229331970215 ms Elapsed: 39.01100158691406 ms Elapsed: 38.4981632232666 ms Elapsed: 38.25807571411133 ms Elapsed: 38.59400749206543 ms Elapsed: 38.83624076843262 ms Elapsed: 38.584232330322266 ms Elapsed: 39.54625129699707 ms Elapsed: 38.268089294433594 ms Elapsed: 39.3218994140625 ms Elapsed: 37.9948616027832 ms Elapsed: 38.05804252624512 ms Elapsed: 38.88821601867676 ms Elapsed: 38.08021545410156 ms Elapsed: 38.22588920593262 ms Elapsed: 37.97507286071777 ms Elapsed: 38.03110122680664 ms Elapsed: 37.91308403015137 ms Elapsed: 38.00201416015625 ms Elapsed: 38.529157638549805 ms Elapsed: 38.44308853149414 ms Elapsed: 38.87534141540527 ms Elapsed: 38.85912895202637 ms Elapsed: 38.48695755004883 ms Elapsed: 38.41686248779297 ms Elapsed: 38.10882568359375 ms Elapsed: 37.98198699951172 ms Elapsed: 38.50507736206055 ms Elapsed: 38.16986083984375 ms Elapsed: 38.07711601257324 ms Elapsed: 37.92715072631836 ms Elapsed: 37.93692588806152 ms Elapsed: 38.04588317871094 ms Elapsed: 38.62190246582031 ms Elapsed: 38.5129451751709 ms Elapsed: 37.960052490234375 ms Elapsed: 37.99295425415039 ms Elapsed: 38.45930099487305 ms ``` Results with this change: ``` Elapsed: 21.09503746032715 ms Elapsed: 17.00878143310547 ms Elapsed: 17.43626594543457 ms Elapsed: 16.201019287109375 ms Elapsed: 16.66712760925293 ms Elapsed: 15.926837921142578 ms Elapsed: 16.408205032348633 ms Elapsed: 16.13783836364746 ms Elapsed: 16.27206802368164 ms Elapsed: 17.15087890625 ms Elapsed: 16.06607437133789 ms Elapsed: 16.852855682373047 ms Elapsed: 23.713111877441406 ms Elapsed: 17.20905303955078 ms Elapsed: 16.60609245300293 ms Elapsed: 16.30997657775879 ms Elapsed: 15.933990478515625 ms Elapsed: 15.688180923461914 ms Elapsed: 16.228914260864258 ms Elapsed: 16.252994537353516 ms Elapsed: 16.33000373840332 ms Elapsed: 15.842676162719727 ms Elapsed: 16.328096389770508 ms Elapsed: 16.4949893951416 ms Elapsed: 16.47210121154785 ms Elapsed: 16.674041748046875 ms Elapsed: 15.768766403198242 ms Elapsed: 16.48569107055664 ms Elapsed: 15.876054763793945 ms Elapsed: 16.852140426635742 ms Elapsed: 16.035079956054688 ms Elapsed: 16.407012939453125 ms Elapsed: 15.882015228271484 ms Elapsed: 16.71886444091797 ms Elapsed: 15.86294174194336 ms Elapsed: 16.566038131713867 ms Elapsed: 15.904903411865234 ms Elapsed: 16.289234161376953 ms Elapsed: 16.14999771118164 ms Elapsed: 16.31784439086914 ms Elapsed: 16.106843948364258 ms Elapsed: 16.581058502197266 ms Elapsed: 16.435861587524414 ms Elapsed: 15.904903411865234 ms Elapsed: 16.408205032348633 ms Elapsed: 16.062021255493164 ms Elapsed: 16.256093978881836 ms Elapsed: 15.87367057800293 ms Elapsed: 16.23702049255371 ms Elapsed: 16.745805740356445 ms Elapsed: 15.92707633972168 ms Elapsed: 16.142845153808594 ms Elapsed: 16.492843627929688 ms Elapsed: 21.553754806518555 ms Elapsed: 17.05002784729004 ms Elapsed: 16.932964324951172 ms Elapsed: 16.810894012451172 ms Elapsed: 16.577720642089844 ms Elapsed: 15.714168548583984 ms Elapsed: 16.2351131439209 ms Elapsed: 16.072988510131836 ms Elapsed: 16.038894653320312 ms Elapsed: 16.055822372436523 ms Elapsed: 16.378164291381836 ms Elapsed: 15.806913375854492 ms Elapsed: 15.5792236328125 ms Elapsed: 15.954732894897461 ms Elapsed: 15.566825866699219 ms Elapsed: 15.707969665527344 ms Elapsed: 15.514135360717773 ms Elapsed: 15.43116569519043 ms Elapsed: 15.332937240600586 ms Elapsed: 15.470027923583984 ms Elapsed: 15.269756317138672 ms Elapsed: 15.250921249389648 ms Elapsed: 15.47694206237793 ms Elapsed: 15.306949615478516 ms Elapsed: 15.72728157043457 ms Elapsed: 15.938043594360352 ms Elapsed: 16.324996948242188 ms Elapsed: 16.198158264160156 ms Elapsed: 15.982627868652344 ms Elapsed: 16.308069229125977 ms Elapsed: 17.843246459960938 ms Elapsed: 15.820026397705078 ms Elapsed: 16.428232192993164 ms Elapsed: 15.978097915649414 ms Elapsed: 16.347885131835938 ms Elapsed: 16.026020050048828 ms Elapsed: 16.362905502319336 ms Elapsed: 16.900062561035156 ms Elapsed: 17.3337459564209 ms Elapsed: 17.65608787536621 ms Elapsed: 20.101070404052734 ms Elapsed: 18.137216567993164 ms Elapsed: 16.952991485595703 ms Elapsed: 16.7691707611084 ms Elapsed: 16.71290397644043 ms Elapsed: 16.3421630859375 ms Elapsed: 16.36195182800293 ms ``` * chore: remove unused field_types variable --- google/cloud/spanner_v1/_helpers.py | 178 ++++++++++++++++++++-------- google/cloud/spanner_v1/snapshot.py | 54 ++++++++- google/cloud/spanner_v1/streamed.py | 65 ++++++++-- tests/system/test_session_api.py | 42 ++++--- 4 files changed, 260 insertions(+), 79 deletions(-) diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py index a1d6a60cb0..a4d66fc20f 100644 --- a/google/cloud/spanner_v1/_helpers.py +++ b/google/cloud/spanner_v1/_helpers.py @@ -266,66 +266,69 @@ def _parse_value_pb(value_pb, field_type, field_name, column_info=None): :returns: value extracted from value_pb :raises ValueError: if unknown type is passed """ + decoder = _get_type_decoder(field_type, field_name, column_info) + return _parse_nullable(value_pb, decoder) + + +def _get_type_decoder(field_type, field_name, column_info=None): + """Returns a function that converts a Value protobuf to cell data. + + :type field_type: :class:`~google.cloud.spanner_v1.types.Type` + :param field_type: type code for the value + + :type field_name: str + :param field_name: column name + + :type column_info: dict + :param column_info: (Optional) dict of column name and column information. + An object where column names as keys and custom objects as corresponding + values for deserialization. It's specifically useful for data types like + protobuf where deserialization logic is on user-specific code. When provided, + the custom object enables deserialization of backend-received column data. + If not provided, data remains serialized as bytes for Proto Messages and + integer for Proto Enums. + + :rtype: a function that takes a single protobuf value as an input argument + :returns: a function that can be used to extract a value from a protobuf value + :raises ValueError: if unknown type is passed + """ + type_code = field_type.code - if value_pb.HasField("null_value"): - return None if type_code == TypeCode.STRING: - return value_pb.string_value + return _parse_string elif type_code == TypeCode.BYTES: - return value_pb.string_value.encode("utf8") + return _parse_bytes elif type_code == TypeCode.BOOL: - return value_pb.bool_value + return _parse_bool elif type_code == TypeCode.INT64: - return int(value_pb.string_value) + return _parse_int64 elif type_code == TypeCode.FLOAT64: - if value_pb.HasField("string_value"): - return float(value_pb.string_value) - else: - return value_pb.number_value + return _parse_float elif type_code == TypeCode.FLOAT32: - if value_pb.HasField("string_value"): - return float(value_pb.string_value) - else: - return value_pb.number_value + return _parse_float elif type_code == TypeCode.DATE: - return _date_from_iso8601_date(value_pb.string_value) + return _parse_date elif type_code == TypeCode.TIMESTAMP: - DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds - return DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value) - elif type_code == TypeCode.ARRAY: - return [ - _parse_value_pb( - item_pb, field_type.array_element_type, field_name, column_info - ) - for item_pb in value_pb.list_value.values - ] - elif type_code == TypeCode.STRUCT: - return [ - _parse_value_pb( - item_pb, field_type.struct_type.fields[i].type_, field_name, column_info - ) - for (i, item_pb) in enumerate(value_pb.list_value.values) - ] + return _parse_timestamp elif type_code == TypeCode.NUMERIC: - return decimal.Decimal(value_pb.string_value) + return _parse_numeric elif type_code == TypeCode.JSON: - return JsonObject.from_str(value_pb.string_value) + return _parse_json elif type_code == TypeCode.PROTO: - bytes_value = base64.b64decode(value_pb.string_value) - if column_info is not None and column_info.get(field_name) is not None: - default_proto_message = column_info.get(field_name) - if isinstance(default_proto_message, Message): - proto_message = type(default_proto_message)() - proto_message.ParseFromString(bytes_value) - return proto_message - return bytes_value + return lambda value_pb: _parse_proto(value_pb, column_info, field_name) elif type_code == TypeCode.ENUM: - int_value = int(value_pb.string_value) - if column_info is not None and column_info.get(field_name) is not None: - proto_enum = column_info.get(field_name) - if isinstance(proto_enum, EnumTypeWrapper): - return proto_enum.Name(int_value) - return int_value + return lambda value_pb: _parse_proto_enum(value_pb, column_info, field_name) + elif type_code == TypeCode.ARRAY: + element_decoder = _get_type_decoder( + field_type.array_element_type, field_name, column_info + ) + return lambda value_pb: _parse_array(value_pb, element_decoder) + elif type_code == TypeCode.STRUCT: + element_decoders = [ + _get_type_decoder(item_field.type_, field_name, column_info) + for item_field in field_type.struct_type.fields + ] + return lambda value_pb: _parse_struct(value_pb, element_decoders) else: raise ValueError("Unknown type: %s" % (field_type,)) @@ -351,6 +354,87 @@ def _parse_list_value_pbs(rows, row_type): return result +def _parse_string(value_pb) -> str: + return value_pb.string_value + + +def _parse_bytes(value_pb): + return value_pb.string_value.encode("utf8") + + +def _parse_bool(value_pb) -> bool: + return value_pb.bool_value + + +def _parse_int64(value_pb) -> int: + return int(value_pb.string_value) + + +def _parse_float(value_pb) -> float: + if value_pb.HasField("string_value"): + return float(value_pb.string_value) + else: + return value_pb.number_value + + +def _parse_date(value_pb): + return _date_from_iso8601_date(value_pb.string_value) + + +def _parse_timestamp(value_pb): + DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds + return DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value) + + +def _parse_numeric(value_pb): + return decimal.Decimal(value_pb.string_value) + + +def _parse_json(value_pb): + return JsonObject.from_str(value_pb.string_value) + + +def _parse_proto(value_pb, column_info, field_name): + bytes_value = base64.b64decode(value_pb.string_value) + if column_info is not None and column_info.get(field_name) is not None: + default_proto_message = column_info.get(field_name) + if isinstance(default_proto_message, Message): + proto_message = type(default_proto_message)() + proto_message.ParseFromString(bytes_value) + return proto_message + return bytes_value + + +def _parse_proto_enum(value_pb, column_info, field_name): + int_value = int(value_pb.string_value) + if column_info is not None and column_info.get(field_name) is not None: + proto_enum = column_info.get(field_name) + if isinstance(proto_enum, EnumTypeWrapper): + return proto_enum.Name(int_value) + return int_value + + +def _parse_array(value_pb, element_decoder) -> []: + return [ + _parse_nullable(item_pb, element_decoder) + for item_pb in value_pb.list_value.values + ] + + +def _parse_struct(value_pb, element_decoders): + return [ + _parse_nullable(item_pb, element_decoders[i]) + for (i, item_pb) in enumerate(value_pb.list_value.values) + ] + + +def _parse_nullable(value_pb, decoder): + if value_pb.HasField("null_value"): + return None + else: + return decoder(value_pb) + + class _SessionWrapper(object): """Base class for objects wrapping a session. diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index a02776b27c..143e17c503 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -192,6 +192,7 @@ def read( retry=gapic_v1.method.DEFAULT, timeout=gapic_v1.method.DEFAULT, column_info=None, + lazy_decode=False, ): """Perform a ``StreamingRead`` API request for rows in a table. @@ -255,6 +256,18 @@ def read( If not provided, data remains serialized as bytes for Proto Messages and integer for Proto Enums. + :type lazy_decode: bool + :param lazy_decode: + (Optional) If this argument is set to ``true``, the iterator + returns the underlying protobuf values instead of decoded Python + objects. This reduces the time that is needed to iterate through + large result sets. The application is responsible for decoding + the data that is needed. The returned row iterator contains two + functions that can be used for this. ``iterator.decode_row(row)`` + decodes all the columns in the given row to an array of Python + objects. ``iterator.decode_column(row, column_index)`` decodes one + specific column in the given row. + :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. @@ -330,10 +343,15 @@ def read( self._read_request_count += 1 if self._multi_use: return StreamedResultSet( - iterator, source=self, column_info=column_info + iterator, + source=self, + column_info=column_info, + lazy_decode=lazy_decode, ) else: - return StreamedResultSet(iterator, column_info=column_info) + return StreamedResultSet( + iterator, column_info=column_info, lazy_decode=lazy_decode + ) else: iterator = _restart_on_unavailable( restart, @@ -348,9 +366,13 @@ def read( self._read_request_count += 1 if self._multi_use: - return StreamedResultSet(iterator, source=self, column_info=column_info) + return StreamedResultSet( + iterator, source=self, column_info=column_info, lazy_decode=lazy_decode + ) else: - return StreamedResultSet(iterator, column_info=column_info) + return StreamedResultSet( + iterator, column_info=column_info, lazy_decode=lazy_decode + ) def execute_sql( self, @@ -366,6 +388,7 @@ def execute_sql( data_boost_enabled=False, directed_read_options=None, column_info=None, + lazy_decode=False, ): """Perform an ``ExecuteStreamingSql`` API request. @@ -438,6 +461,18 @@ def execute_sql( If not provided, data remains serialized as bytes for Proto Messages and integer for Proto Enums. + :type lazy_decode: bool + :param lazy_decode: + (Optional) If this argument is set to ``true``, the iterator + returns the underlying protobuf values instead of decoded Python + objects. This reduces the time that is needed to iterate through + large result sets. The application is responsible for decoding + the data that is needed. The returned row iterator contains two + functions that can be used for this. ``iterator.decode_row(row)`` + decodes all the columns in the given row to an array of Python + objects. ``iterator.decode_column(row, column_index)`` decodes one + specific column in the given row. + :raises ValueError: for reuse of single-use snapshots, or if a transaction ID is already pending for multiple-use snapshots. @@ -517,6 +552,7 @@ def execute_sql( trace_attributes, column_info, observability_options, + lazy_decode=lazy_decode, ) else: return self._get_streamed_result_set( @@ -525,6 +561,7 @@ def execute_sql( trace_attributes, column_info, observability_options, + lazy_decode=lazy_decode, ) def _get_streamed_result_set( @@ -534,6 +571,7 @@ def _get_streamed_result_set( trace_attributes, column_info, observability_options=None, + lazy_decode=False, ): iterator = _restart_on_unavailable( restart, @@ -548,9 +586,13 @@ def _get_streamed_result_set( self._execute_sql_count += 1 if self._multi_use: - return StreamedResultSet(iterator, source=self, column_info=column_info) + return StreamedResultSet( + iterator, source=self, column_info=column_info, lazy_decode=lazy_decode + ) else: - return StreamedResultSet(iterator, column_info=column_info) + return StreamedResultSet( + iterator, column_info=column_info, lazy_decode=lazy_decode + ) def partition_read( self, diff --git a/google/cloud/spanner_v1/streamed.py b/google/cloud/spanner_v1/streamed.py index 89bde0e334..7c067e97b6 100644 --- a/google/cloud/spanner_v1/streamed.py +++ b/google/cloud/spanner_v1/streamed.py @@ -21,7 +21,7 @@ from google.cloud.spanner_v1 import PartialResultSet from google.cloud.spanner_v1 import ResultSetMetadata from google.cloud.spanner_v1 import TypeCode -from google.cloud.spanner_v1._helpers import _parse_value_pb +from google.cloud.spanner_v1._helpers import _get_type_decoder, _parse_nullable class StreamedResultSet(object): @@ -37,7 +37,13 @@ class StreamedResultSet(object): :param source: Snapshot from which the result set was fetched. """ - def __init__(self, response_iterator, source=None, column_info=None): + def __init__( + self, + response_iterator, + source=None, + column_info=None, + lazy_decode: bool = False, + ): self._response_iterator = response_iterator self._rows = [] # Fully-processed rows self._metadata = None # Until set from first PRS @@ -46,6 +52,8 @@ def __init__(self, response_iterator, source=None, column_info=None): self._pending_chunk = None # Incomplete value self._source = source # Source snapshot self._column_info = column_info # Column information + self._field_decoders = None + self._lazy_decode = lazy_decode # Return protobuf values @property def fields(self): @@ -77,6 +85,17 @@ def stats(self): """ return self._stats + @property + def _decoders(self): + if self._field_decoders is None: + if self._metadata is None: + raise ValueError("iterator not started") + self._field_decoders = [ + _get_type_decoder(field.type_, field.name, self._column_info) + for field in self.fields + ] + return self._field_decoders + def _merge_chunk(self, value): """Merge pending chunk with next value. @@ -99,16 +118,14 @@ def _merge_values(self, values): :type values: list of :class:`~google.protobuf.struct_pb2.Value` :param values: non-chunked values from partial result set. """ - field_types = [field.type_ for field in self.fields] - field_names = [field.name for field in self.fields] - width = len(field_types) + decoders = self._decoders + width = len(self.fields) index = len(self._current_row) for value in values: - self._current_row.append( - _parse_value_pb( - value, field_types[index], field_names[index], self._column_info - ) - ) + if self._lazy_decode: + self._current_row.append(value) + else: + self._current_row.append(_parse_nullable(value, decoders[index])) index += 1 if index == width: self._rows.append(self._current_row) @@ -152,6 +169,34 @@ def __iter__(self): except StopIteration: return + def decode_row(self, row: []) -> []: + """Decodes a row from protobuf values to Python objects. This function + should only be called for result sets that use ``lazy_decoding=True``. + The array that is returned by this function is the same as the array + that would have been returned by the rows iterator if ``lazy_decoding=False``. + + :returns: an array containing the decoded values of all the columns in the given row + """ + if not hasattr(row, "__len__"): + raise TypeError("row", "row must be an array of protobuf values") + decoders = self._decoders + return [ + _parse_nullable(row[index], decoders[index]) for index in range(len(row)) + ] + + def decode_column(self, row: [], column_index: int): + """Decodes a column from a protobuf value to a Python object. This function + should only be called for result sets that use ``lazy_decoding=True``. + The object that is returned by this function is the same as the object + that would have been returned by the rows iterator if ``lazy_decoding=False``. + + :returns: the decoded column value + """ + if not hasattr(row, "__len__"): + raise TypeError("row", "row must be an array of protobuf values") + decoders = self._decoders + return _parse_nullable(row[column_index], decoders[column_index]) + def one(self): """Return exactly one result, or raise an exception. diff --git a/tests/system/test_session_api.py b/tests/system/test_session_api.py index 5322527d12..b7337cb258 100644 --- a/tests/system/test_session_api.py +++ b/tests/system/test_session_api.py @@ -2018,17 +2018,20 @@ def test_execute_sql_w_manual_consume(sessions_database): row_count = 3000 committed = _set_up_table(sessions_database, row_count) - with sessions_database.snapshot(read_timestamp=committed) as snapshot: - streamed = snapshot.execute_sql(sd.SQL) + for lazy_decode in [False, True]: + with sessions_database.snapshot(read_timestamp=committed) as snapshot: + streamed = snapshot.execute_sql(sd.SQL, lazy_decode=lazy_decode) - keyset = spanner_v1.KeySet(all_=True) + keyset = spanner_v1.KeySet(all_=True) - with sessions_database.snapshot(read_timestamp=committed) as snapshot: - rows = list(snapshot.read(sd.TABLE, sd.COLUMNS, keyset)) + with sessions_database.snapshot(read_timestamp=committed) as snapshot: + rows = list( + snapshot.read(sd.TABLE, sd.COLUMNS, keyset, lazy_decode=lazy_decode) + ) - assert list(streamed) == rows - assert streamed._current_row == [] - assert streamed._pending_chunk is None + assert list(streamed) == rows + assert streamed._current_row == [] + assert streamed._pending_chunk is None def test_execute_sql_w_to_dict_list(sessions_database): @@ -2057,16 +2060,23 @@ def _check_sql_results( if order and "ORDER" not in sql: sql += " ORDER BY pkey" - with database.snapshot() as snapshot: - rows = list( - snapshot.execute_sql( - sql, params=params, param_types=param_types, column_info=column_info + for lazy_decode in [False, True]: + with database.snapshot() as snapshot: + iterator = snapshot.execute_sql( + sql, + params=params, + param_types=param_types, + column_info=column_info, + lazy_decode=lazy_decode, ) - ) + rows = list(iterator) + if lazy_decode: + for index, row in enumerate(rows): + rows[index] = iterator.decode_row(row) - _sample_data._check_rows_data( - rows, expected=expected, recurse_into_lists=recurse_into_lists - ) + _sample_data._check_rows_data( + rows, expected=expected, recurse_into_lists=recurse_into_lists + ) def test_multiuse_snapshot_execute_sql_isolation_strong(sessions_database): From eeb7836b6350aa9626dfb733208e6827d38bb9c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Wed, 4 Dec 2024 12:30:04 +0100 Subject: [PATCH 04/13] feat: add connection variable for ignoring transaction warnings (#1249) Adds a connection variable for ignoring transaction warnings. Also adds a **kwargs argument to the connect function. This will be used for further connection variables in the future. Fixes https://github.com/googleapis/python-spanner-sqlalchemy/issues/494 --- google/cloud/spanner_dbapi/connection.py | 26 +++++++++++++++------ tests/unit/spanner_dbapi/test_connection.py | 13 +++++++++++ 2 files changed, 32 insertions(+), 7 deletions(-) diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py index b02d62ea27..65afcd4a2a 100644 --- a/google/cloud/spanner_dbapi/connection.py +++ b/google/cloud/spanner_dbapi/connection.py @@ -89,9 +89,11 @@ class Connection: committed by other transactions since the start of the read-only transaction. Commit or rolling back the read-only transaction is semantically the same, and only indicates that the read-only transaction should end a that a new one should be started when the next statement is executed. + + **kwargs: Initial value for connection variables. """ - def __init__(self, instance, database=None, read_only=False): + def __init__(self, instance, database=None, read_only=False, **kwargs): self._instance = instance self._database = database self._ddl_statements = [] @@ -117,6 +119,7 @@ def __init__(self, instance, database=None, read_only=False): self._batch_dml_executor: BatchDmlExecutor = None self._transaction_helper = TransactionRetryHelper(self) self._autocommit_dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL + self._connection_variables = kwargs @property def spanner_client(self): @@ -206,6 +209,10 @@ def _client_transaction_started(self): """ return (not self._autocommit) or self._transaction_begin_marked + @property + def _ignore_transaction_warnings(self): + return self._connection_variables.get("ignore_transaction_warnings", False) + @property def instance(self): """Instance to which this connection relates. @@ -398,9 +405,10 @@ def commit(self): if self.database is None: raise ValueError("Database needs to be passed for this operation") if not self._client_transaction_started: - warnings.warn( - CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 - ) + if not self._ignore_transaction_warnings: + warnings.warn( + CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 + ) return self.run_prior_DDL_statements() @@ -418,9 +426,10 @@ def rollback(self): This is a no-op if there is no active client transaction. """ if not self._client_transaction_started: - warnings.warn( - CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 - ) + if not self._ignore_transaction_warnings: + warnings.warn( + CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 + ) return try: if self._spanner_transaction_started and not self._read_only: @@ -654,6 +663,7 @@ def connect( user_agent=None, client=None, route_to_leader_enabled=True, + **kwargs, ): """Creates a connection to a Google Cloud Spanner database. @@ -696,6 +706,8 @@ def connect( disable leader aware routing. Disabling leader aware routing would route all requests in RW/PDML transactions to the closest region. + **kwargs: Initial value for connection variables. + :rtype: :class:`google.cloud.spanner_dbapi.connection.Connection` :returns: Connection object associated with the given Google Cloud Spanner diff --git a/tests/unit/spanner_dbapi/test_connection.py b/tests/unit/spanner_dbapi/test_connection.py index d0fa521f8f..62867bbd2e 100644 --- a/tests/unit/spanner_dbapi/test_connection.py +++ b/tests/unit/spanner_dbapi/test_connection.py @@ -300,6 +300,19 @@ def test_commit_in_autocommit_mode(self, mock_warn): CLIENT_TRANSACTION_NOT_STARTED_WARNING, UserWarning, stacklevel=2 ) + @mock.patch.object(warnings, "warn") + def test_commit_in_autocommit_mode_with_ignore_warnings(self, mock_warn): + conn = self._make_connection( + DatabaseDialect.DATABASE_DIALECT_UNSPECIFIED, + ignore_transaction_warnings=True, + ) + assert conn._ignore_transaction_warnings + conn._autocommit = True + + conn.commit() + + assert not mock_warn.warn.called + def test_commit_database_error(self): from google.cloud.spanner_dbapi import Connection From 5e8ca949b583fbcf0b92b42696545973aad8c78f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Wed, 4 Dec 2024 13:57:15 +0100 Subject: [PATCH 05/13] fix: allow setting connection.read_only to same value (#1247) Setting the read_only value of a connection to the same value as the current value should be allowed during a transaction, as it does not change anything. SQLAlchemy regularly does this if engine options have been specified. Fixes https://github.com/googleapis/python-spanner-sqlalchemy/issues/493 --- google/cloud/spanner_dbapi/connection.py | 2 +- tests/unit/spanner_dbapi/test_connection.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py index 65afcd4a2a..416bb2a959 100644 --- a/google/cloud/spanner_dbapi/connection.py +++ b/google/cloud/spanner_dbapi/connection.py @@ -239,7 +239,7 @@ def read_only(self, value): Args: value (bool): True for ReadOnly mode, False for ReadWrite. """ - if self._spanner_transaction_started: + if self._read_only != value and self._spanner_transaction_started: raise ValueError( "Connection read/write mode can't be changed while a transaction is in progress. " "Commit or rollback the current transaction and try again." diff --git a/tests/unit/spanner_dbapi/test_connection.py b/tests/unit/spanner_dbapi/test_connection.py index 62867bbd2e..a07e94735f 100644 --- a/tests/unit/spanner_dbapi/test_connection.py +++ b/tests/unit/spanner_dbapi/test_connection.py @@ -138,6 +138,10 @@ def test_read_only_connection(self): ): connection.read_only = False + # Verify that we can set the value to the same value as it already has. + connection.read_only = True + self.assertTrue(connection.read_only) + connection._spanner_transaction_started = False connection.read_only = False self.assertFalse(connection.read_only) From 829b799e0c9c6da274bf95c272cda564cfdba928 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Wed, 4 Dec 2024 15:20:51 +0100 Subject: [PATCH 06/13] feat: support float32 parameters in dbapi (#1245) * feat: support float32 parameters in dbapi dbapi should not add an explicit type code when a parameter of type float is encountered. Instead, it should rely on Spanner to infer the correct type. This way, both FLOAT32 and FLOAT64 can be used with the Python float type. Updates https://github.com/googleapis/python-spanner-sqlalchemy/issues/409 * chore: remove whitespaces --------- Co-authored-by: Sri Harsha CH <57220027+harshachinta@users.noreply.github.com> --- google/cloud/spanner_dbapi/parse_utils.py | 9 +++- tests/system/test_dbapi.py | 47 +++++++++++++++++++- tests/unit/spanner_dbapi/test_parse_utils.py | 3 +- 3 files changed, 55 insertions(+), 4 deletions(-) diff --git a/google/cloud/spanner_dbapi/parse_utils.py b/google/cloud/spanner_dbapi/parse_utils.py index 403550640e..f039efe5b0 100644 --- a/google/cloud/spanner_dbapi/parse_utils.py +++ b/google/cloud/spanner_dbapi/parse_utils.py @@ -29,12 +29,19 @@ from .types import DateStr, TimestampStr from .utils import sanitize_literals_for_upload +# Note: This mapping deliberately does not contain a value for float. +# The reason for that is that it is better to just let Spanner determine +# the parameter type instead of specifying one explicitly. The reason for +# this is that if the client specifies FLOAT64, and the actual column that +# the parameter is used for is of type FLOAT32, then Spanner will return an +# error. If however the client does not specify a type, then Spanner will +# automatically choose the appropriate type based on the column where the +# value will be inserted/updated or that it will be compared with. TYPES_MAP = { bool: spanner.param_types.BOOL, bytes: spanner.param_types.BYTES, str: spanner.param_types.STRING, int: spanner.param_types.INT64, - float: spanner.param_types.FLOAT64, datetime.datetime: spanner.param_types.TIMESTAMP, datetime.date: spanner.param_types.DATE, DateStr: spanner.param_types.DATE, diff --git a/tests/system/test_dbapi.py b/tests/system/test_dbapi.py index feb580d903..a98f100bcc 100644 --- a/tests/system/test_dbapi.py +++ b/tests/system/test_dbapi.py @@ -11,11 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import base64 import datetime from collections import defaultdict + import pytest import time +import decimal from google.cloud import spanner_v1 from google.cloud._helpers import UTC @@ -50,7 +52,22 @@ SQL SECURITY INVOKER AS SELECT c.email - FROM contacts AS c;""" + FROM contacts AS c; + + CREATE TABLE all_types ( + id int64, + col_bool bool, + col_bytes bytes(max), + col_date date, + col_float32 float32, + col_float64 float64, + col_int64 int64, + col_json json, + col_numeric numeric, + col_string string(max), + coL_timestamp timestamp, + ) primary key (col_int64); + """ DDL_STATEMENTS = [stmt.strip() for stmt in DDL.split(";") if stmt.strip()] @@ -1602,3 +1619,29 @@ def test_list_tables(self, include_views): def test_invalid_statement_error(self): with pytest.raises(ProgrammingError): self._cursor.execute("-- comment only") + + def test_insert_all_types(self): + """Test inserting all supported data types""" + + self._conn.autocommit = True + self._cursor.execute( + """ + INSERT INTO all_types (id, col_bool, col_bytes, col_date, col_float32, col_float64, + col_int64, col_json, col_numeric, col_string, col_timestamp) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) + """, + ( + 1, + True, + base64.b64encode(b"test-bytes"), + datetime.date(2024, 12, 3), + 3.14, + 3.14, + 123, + JsonObject({"key": "value"}), + decimal.Decimal("3.14"), + "test-string", + datetime.datetime(2024, 12, 3, 17, 30, 14), + ), + ) + assert self._cursor.rowcount == 1 diff --git a/tests/unit/spanner_dbapi/test_parse_utils.py b/tests/unit/spanner_dbapi/test_parse_utils.py index 3a325014fa..4b1c7cdb06 100644 --- a/tests/unit/spanner_dbapi/test_parse_utils.py +++ b/tests/unit/spanner_dbapi/test_parse_utils.py @@ -218,6 +218,8 @@ def test_get_param_types(self): params = { "a1": 10, "b1": "string", + # Note: We only want a value and not a type for this. + # Instead, we let Spanner infer the correct type (FLOAT64 or FLOAT32) "c1": 10.39, "d1": TimestampStr("2005-08-30T01:01:01.000001Z"), "e1": DateStr("2019-12-05"), @@ -232,7 +234,6 @@ def test_get_param_types(self): want_types = { "a1": param_types.INT64, "b1": param_types.STRING, - "c1": param_types.FLOAT64, "d1": param_types.TIMESTAMP, "e1": param_types.DATE, "f1": param_types.BOOL, From 7df93ca9b11a5cd2fff448587d87d63e29d7a828 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Thu, 5 Dec 2024 11:17:53 +0100 Subject: [PATCH 07/13] test: add mock server tests (#1217) * test: add mock server tests * chore: move to testing folder + fix formatting * refactor: move mock server tests to separate directory * feat: add database admin service Adds a DatabaseAdminService to the mock server and sets up a basic test case for this. Also removes the generated stubs in the grpc files, as these are not needed. * test: add DDL test * test: add async client tests * chore: remove async + add transaction handling * chore: cleanup * chore: run code formatter --- .github/workflows/mock_server_tests.yaml | 21 + google/cloud/spanner_v1/database.py | 2 +- google/cloud/spanner_v1/testing/__init__.py | 0 .../spanner_v1/testing/mock_database_admin.py | 38 + .../cloud/spanner_v1/testing/mock_spanner.py | 216 +++ .../spanner_database_admin_pb2_grpc.py | 1267 +++++++++++++++++ .../spanner_v1/testing/spanner_pb2_grpc.py | 882 ++++++++++++ noxfile.py | 29 + tests/mockserver_tests/__init__.py | 0 tests/mockserver_tests/test_basics.py | 151 ++ 10 files changed, 2605 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/mock_server_tests.yaml create mode 100644 google/cloud/spanner_v1/testing/__init__.py create mode 100644 google/cloud/spanner_v1/testing/mock_database_admin.py create mode 100644 google/cloud/spanner_v1/testing/mock_spanner.py create mode 100644 google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py create mode 100644 google/cloud/spanner_v1/testing/spanner_pb2_grpc.py create mode 100644 tests/mockserver_tests/__init__.py create mode 100644 tests/mockserver_tests/test_basics.py diff --git a/.github/workflows/mock_server_tests.yaml b/.github/workflows/mock_server_tests.yaml new file mode 100644 index 0000000000..2da5320071 --- /dev/null +++ b/.github/workflows/mock_server_tests.yaml @@ -0,0 +1,21 @@ +on: + push: + branches: + - main + pull_request: +name: Run Spanner tests against an in-mem mock server +jobs: + system-tests: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.12 + - name: Install nox + run: python -m pip install nox + - name: Run mock server tests + run: nox -s mockserver diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index abddd5d97d..1e10e1df73 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -142,7 +142,7 @@ class Database(object): statements in 'ddl_statements' above. """ - _spanner_api = None + _spanner_api: SpannerClient = None def __init__( self, diff --git a/google/cloud/spanner_v1/testing/__init__.py b/google/cloud/spanner_v1/testing/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/google/cloud/spanner_v1/testing/mock_database_admin.py b/google/cloud/spanner_v1/testing/mock_database_admin.py new file mode 100644 index 0000000000..a9b4eb6392 --- /dev/null +++ b/google/cloud/spanner_v1/testing/mock_database_admin.py @@ -0,0 +1,38 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from google.longrunning import operations_pb2 as operations_pb2 +from google.protobuf import empty_pb2 +import google.cloud.spanner_v1.testing.spanner_database_admin_pb2_grpc as database_admin_grpc + + +# An in-memory mock DatabaseAdmin server that can be used for testing. +class DatabaseAdminServicer(database_admin_grpc.DatabaseAdminServicer): + def __init__(self): + self._requests = [] + + @property + def requests(self): + return self._requests + + def clear_requests(self): + self._requests = [] + + def UpdateDatabaseDdl(self, request, context): + self._requests.append(request) + operation = operations_pb2.Operation() + operation.done = True + operation.name = "projects/test-project/operations/test-operation" + operation.response.Pack(empty_pb2.Empty()) + return operation diff --git a/google/cloud/spanner_v1/testing/mock_spanner.py b/google/cloud/spanner_v1/testing/mock_spanner.py new file mode 100644 index 0000000000..d01c63aff5 --- /dev/null +++ b/google/cloud/spanner_v1/testing/mock_spanner.py @@ -0,0 +1,216 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import base64 +import grpc +from concurrent import futures + +from google.protobuf import empty_pb2 +from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer +import google.cloud.spanner_v1.testing.spanner_database_admin_pb2_grpc as database_admin_grpc +import google.cloud.spanner_v1.testing.spanner_pb2_grpc as spanner_grpc +import google.cloud.spanner_v1.types.commit_response as commit +import google.cloud.spanner_v1.types.result_set as result_set +import google.cloud.spanner_v1.types.spanner as spanner +import google.cloud.spanner_v1.types.transaction as transaction + + +class MockSpanner: + def __init__(self): + self.results = {} + + def add_result(self, sql: str, result: result_set.ResultSet): + self.results[sql.lower().strip()] = result + + def get_result(self, sql: str) -> result_set.ResultSet: + result = self.results.get(sql.lower().strip()) + if result is None: + raise ValueError(f"No result found for {sql}") + return result + + def get_result_as_partial_result_sets( + self, sql: str + ) -> [result_set.PartialResultSet]: + result: result_set.ResultSet = self.get_result(sql) + partials = [] + first = True + if len(result.rows) == 0: + partial = result_set.PartialResultSet() + partial.metadata = result.metadata + partials.append(partial) + else: + for row in result.rows: + partial = result_set.PartialResultSet() + if first: + partial.metadata = result.metadata + partial.values.extend(row) + partials.append(partial) + partials[len(partials) - 1].stats = result.stats + return partials + + +# An in-memory mock Spanner server that can be used for testing. +class SpannerServicer(spanner_grpc.SpannerServicer): + def __init__(self): + self._requests = [] + self.session_counter = 0 + self.sessions = {} + self.transaction_counter = 0 + self.transactions = {} + self._mock_spanner = MockSpanner() + + @property + def mock_spanner(self): + return self._mock_spanner + + @property + def requests(self): + return self._requests + + def clear_requests(self): + self._requests = [] + + def CreateSession(self, request, context): + self._requests.append(request) + return self.__create_session(request.database, request.session) + + def BatchCreateSessions(self, request, context): + self._requests.append(request) + sessions = [] + for i in range(request.session_count): + sessions.append( + self.__create_session(request.database, request.session_template) + ) + return spanner.BatchCreateSessionsResponse(dict(session=sessions)) + + def __create_session(self, database: str, session_template: spanner.Session): + self.session_counter += 1 + session = spanner.Session() + session.name = database + "/sessions/" + str(self.session_counter) + session.multiplexed = session_template.multiplexed + session.labels.MergeFrom(session_template.labels) + session.creator_role = session_template.creator_role + self.sessions[session.name] = session + return session + + def GetSession(self, request, context): + self._requests.append(request) + return spanner.Session() + + def ListSessions(self, request, context): + self._requests.append(request) + return [spanner.Session()] + + def DeleteSession(self, request, context): + self._requests.append(request) + return empty_pb2.Empty() + + def ExecuteSql(self, request, context): + self._requests.append(request) + return result_set.ResultSet() + + def ExecuteStreamingSql(self, request, context): + self._requests.append(request) + partials = self.mock_spanner.get_result_as_partial_result_sets(request.sql) + for result in partials: + yield result + + def ExecuteBatchDml(self, request, context): + self._requests.append(request) + response = spanner.ExecuteBatchDmlResponse() + started_transaction = None + if not request.transaction.begin == transaction.TransactionOptions(): + started_transaction = self.__create_transaction( + request.session, request.transaction.begin + ) + first = True + for statement in request.statements: + result = self.mock_spanner.get_result(statement.sql) + if first and started_transaction is not None: + result = result_set.ResultSet( + self.mock_spanner.get_result(statement.sql) + ) + result.metadata = result_set.ResultSetMetadata(result.metadata) + result.metadata.transaction = started_transaction + response.result_sets.append(result) + return response + + def Read(self, request, context): + self._requests.append(request) + return result_set.ResultSet() + + def StreamingRead(self, request, context): + self._requests.append(request) + for result in [result_set.PartialResultSet(), result_set.PartialResultSet()]: + yield result + + def BeginTransaction(self, request, context): + self._requests.append(request) + return self.__create_transaction(request.session, request.options) + + def __create_transaction( + self, session: str, options: transaction.TransactionOptions + ) -> transaction.Transaction: + session = self.sessions[session] + if session is None: + raise ValueError(f"Session not found: {session}") + self.transaction_counter += 1 + id_bytes = bytes( + f"{session.name}/transactions/{self.transaction_counter}", "UTF-8" + ) + transaction_id = base64.urlsafe_b64encode(id_bytes) + self.transactions[transaction_id] = options + return transaction.Transaction(dict(id=transaction_id)) + + def Commit(self, request, context): + self._requests.append(request) + tx = self.transactions[request.transaction_id] + if tx is None: + raise ValueError(f"Transaction not found: {request.transaction_id}") + del self.transactions[request.transaction_id] + return commit.CommitResponse() + + def Rollback(self, request, context): + self._requests.append(request) + return empty_pb2.Empty() + + def PartitionQuery(self, request, context): + self._requests.append(request) + return spanner.PartitionResponse() + + def PartitionRead(self, request, context): + self._requests.append(request) + return spanner.PartitionResponse() + + def BatchWrite(self, request, context): + self._requests.append(request) + for result in [spanner.BatchWriteResponse(), spanner.BatchWriteResponse()]: + yield result + + +def start_mock_server() -> (grpc.Server, SpannerServicer, DatabaseAdminServicer, int): + # Create a gRPC server. + spanner_server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + + # Add the Spanner services to the gRPC server. + spanner_servicer = SpannerServicer() + spanner_grpc.add_SpannerServicer_to_server(spanner_servicer, spanner_server) + database_admin_servicer = DatabaseAdminServicer() + database_admin_grpc.add_DatabaseAdminServicer_to_server( + database_admin_servicer, spanner_server + ) + + # Start the server on a random port. + port = spanner_server.add_insecure_port("[::]:0") + spanner_server.start() + return spanner_server, spanner_servicer, database_admin_servicer, port diff --git a/google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py b/google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py new file mode 100644 index 0000000000..fdc26b30ad --- /dev/null +++ b/google/cloud/spanner_v1/testing/spanner_database_admin_pb2_grpc.py @@ -0,0 +1,1267 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! + + +# Generated with the following commands: +# +# pip install grpcio-tools +# git clone git@github.com:googleapis/googleapis.git +# cd googleapis +# python -m grpc_tools.protoc \ +# -I . \ +# --python_out=. --pyi_out=. --grpc_python_out=. \ +# ./google/spanner/admin/database/v1/*.proto + +"""Client and server classes corresponding to protobuf-defined services.""" + +import grpc +from google.iam.v1 import iam_policy_pb2 as google_dot_iam_dot_v1_dot_iam__policy__pb2 +from google.iam.v1 import policy_pb2 as google_dot_iam_dot_v1_dot_policy__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.cloud.spanner_admin_database_v1.types import ( + backup as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2, +) +from google.cloud.spanner_admin_database_v1.types import ( + backup_schedule as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2, +) +from google.cloud.spanner_admin_database_v1.types import ( + spanner_database_admin as google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2, +) + +GRPC_GENERATED_VERSION = "1.67.0" +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + + _version_not_supported = first_version_is_lower( + GRPC_VERSION, GRPC_GENERATED_VERSION + ) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f"The grpc package installed is at version {GRPC_VERSION}," + + " but the generated code in google/spanner/admin/database/v1/spanner_database_admin_pb2_grpc.py depends on" + + f" grpcio>={GRPC_GENERATED_VERSION}." + + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}" + + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}." + ) + + +class DatabaseAdminServicer(object): + """Cloud Spanner Database Admin API + + The Cloud Spanner Database Admin API can be used to: + * create, drop, and list databases + * update the schema of pre-existing databases + * create, delete, copy and list backups for a database + * restore a database from an existing backup + """ + + def ListDatabases(self, request, context): + """Lists Cloud Spanner databases.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateDatabase(self, request, context): + """Creates a new Cloud Spanner database and starts to prepare it for serving. + The returned [long-running operation][google.longrunning.Operation] will + have a name of the format `/operations/` and + can be used to track preparation of the database. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Database][google.spanner.admin.database.v1.Database], if successful. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetDatabase(self, request, context): + """Gets the state of a Cloud Spanner database.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateDatabase(self, request, context): + """Updates a Cloud Spanner database. The returned + [long-running operation][google.longrunning.Operation] can be used to track + the progress of updating the database. If the named database does not + exist, returns `NOT_FOUND`. + + While the operation is pending: + + * The database's + [reconciling][google.spanner.admin.database.v1.Database.reconciling] + field is set to true. + * Cancelling the operation is best-effort. If the cancellation succeeds, + the operation metadata's + [cancel_time][google.spanner.admin.database.v1.UpdateDatabaseMetadata.cancel_time] + is set, the updates are reverted, and the operation terminates with a + `CANCELLED` status. + * New UpdateDatabase requests will return a `FAILED_PRECONDITION` error + until the pending operation is done (returns successfully or with + error). + * Reading the database via the API continues to give the pre-request + values. + + Upon completion of the returned operation: + + * The new values are in effect and readable via the API. + * The database's + [reconciling][google.spanner.admin.database.v1.Database.reconciling] + field becomes false. + + The returned [long-running operation][google.longrunning.Operation] will + have a name of the format + `projects//instances//databases//operations/` + and can be used to track the database modification. The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseMetadata][google.spanner.admin.database.v1.UpdateDatabaseMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Database][google.spanner.admin.database.v1.Database], if successful. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateDatabaseDdl(self, request, context): + """Updates the schema of a Cloud Spanner database by + creating/altering/dropping tables, columns, indexes, etc. The returned + [long-running operation][google.longrunning.Operation] will have a name of + the format `/operations/` and can be used to + track execution of the schema change(s). The + [metadata][google.longrunning.Operation.metadata] field type is + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. + The operation has no response. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DropDatabase(self, request, context): + """Drops (aka deletes) a Cloud Spanner database. + Completed backups for the database will be retained according to their + `expire_time`. + Note: Cloud Spanner might continue to accept requests for a few seconds + after the database has been deleted. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetDatabaseDdl(self, request, context): + """Returns the schema of a Cloud Spanner database as a list of formatted + DDL statements. This method does not show pending schema updates, those may + be queried using the [Operations][google.longrunning.Operations] API. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def SetIamPolicy(self, request, context): + """Sets the access control policy on a database or backup resource. + Replaces any existing policy. + + Authorization requires `spanner.databases.setIamPolicy` + permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + For backups, authorization requires `spanner.backups.setIamPolicy` + permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetIamPolicy(self, request, context): + """Gets the access control policy for a database or backup resource. + Returns an empty policy if a database or backup exists but does not have a + policy set. + + Authorization requires `spanner.databases.getIamPolicy` permission on + [resource][google.iam.v1.GetIamPolicyRequest.resource]. + For backups, authorization requires `spanner.backups.getIamPolicy` + permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def TestIamPermissions(self, request, context): + """Returns permissions that the caller has on the specified database or backup + resource. + + Attempting this RPC on a non-existent Cloud Spanner database will + result in a NOT_FOUND error if the user has + `spanner.databases.list` permission on the containing Cloud + Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will + result in a NOT_FOUND error if the user has + `spanner.backups.list` permission on the containing instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateBackup(self, request, context): + """Starts creating a new Cloud Spanner Backup. + The returned backup [long-running operation][google.longrunning.Operation] + will have a name of the format + `projects//instances//backups//operations/` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Backup][google.spanner.admin.database.v1.Backup], if successful. + Cancelling the returned operation will stop the creation and delete the + backup. There can be only one pending backup creation per database. Backup + creation of different databases can run concurrently. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CopyBackup(self, request, context): + """Starts copying a Cloud Spanner Backup. + The returned backup [long-running operation][google.longrunning.Operation] + will have a name of the format + `projects//instances//backups//operations/` + and can be used to track copying of the backup. The operation is associated + with the destination backup. + The [metadata][google.longrunning.Operation.metadata] field type is + [CopyBackupMetadata][google.spanner.admin.database.v1.CopyBackupMetadata]. + The [response][google.longrunning.Operation.response] field type is + [Backup][google.spanner.admin.database.v1.Backup], if successful. + Cancelling the returned operation will stop the copying and delete the + destination backup. Concurrent CopyBackup requests can run on the same + source backup. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackup(self, request, context): + """Gets metadata on a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackup(self, request, context): + """Updates a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackup(self, request, context): + """Deletes a pending or completed + [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackups(self, request, context): + """Lists completed and pending backups. + Backups returned are ordered by `create_time` in descending order, + starting from the most recent `create_time`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def RestoreDatabase(self, request, context): + """Create a new database by restoring from a completed backup. The new + database must be in the same project and in an instance with the same + instance configuration as the instance containing + the backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the format + `projects//instances//databases//operations/`, + and can be used to track the progress of the operation, and to cancel it. + The [metadata][google.longrunning.Operation.metadata] field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type + is [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the restore and + delete the database. + There can be only one database being restored into an instance at a time. + Once the restore operation completes, a new restore operation can be + initiated, without waiting for the optimize operation associated with the + first restore to complete. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListDatabaseOperations(self, request, context): + """Lists database [longrunning-operations][google.longrunning.Operation]. + A database operation has a name of the form + `projects//instances//databases//operations/`. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + `metadata.type_url` describes the type of the metadata. Operations returned + include those that have completed/failed/canceled within the last 7 days, + and pending operations. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackupOperations(self, request, context): + """Lists the backup [long-running operations][google.longrunning.Operation] in + the given instance. A backup operation has a name of the form + `projects//instances//backups//operations/`. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + `metadata.type_url` describes the type of the metadata. Operations returned + include those that have completed/failed/canceled within the last 7 days, + and pending operations. Operations returned are ordered by + `operation.metadata.value.progress.start_time` in descending order starting + from the most recently started operation. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListDatabaseRoles(self, request, context): + """Lists Cloud Spanner database roles.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateBackupSchedule(self, request, context): + """Creates a new backup schedule.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackupSchedule(self, request, context): + """Gets backup schedule for the input schedule name.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackupSchedule(self, request, context): + """Updates a backup schedule.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackupSchedule(self, request, context): + """Deletes a backup schedule.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackupSchedules(self, request, context): + """Lists all the backup schedules for the database.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_DatabaseAdminServicer_to_server(servicer, server): + rpc_method_handlers = { + "ListDatabases": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabases, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesResponse.serialize, + ), + "CreateDatabase": grpc.unary_unary_rpc_method_handler( + servicer.CreateDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.CreateDatabaseRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetDatabase": grpc.unary_unary_rpc_method_handler( + servicer.GetDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.Database.serialize, + ), + "UpdateDatabase": grpc.unary_unary_rpc_method_handler( + servicer.UpdateDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "UpdateDatabaseDdl": grpc.unary_unary_rpc_method_handler( + servicer.UpdateDatabaseDdl, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "DropDatabase": grpc.unary_unary_rpc_method_handler( + servicer.DropDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.DropDatabaseRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "GetDatabaseDdl": grpc.unary_unary_rpc_method_handler( + servicer.GetDatabaseDdl, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.serialize, + ), + "SetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.SetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "GetIamPolicy": grpc.unary_unary_rpc_method_handler( + servicer.GetIamPolicy, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_policy__pb2.Policy.SerializeToString, + ), + "TestIamPermissions": grpc.unary_unary_rpc_method_handler( + servicer.TestIamPermissions, + request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, + response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, + ), + "CreateBackup": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CreateBackupRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "CopyBackup": grpc.unary_unary_rpc_method_handler( + servicer.CopyBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CopyBackupRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetBackup": grpc.unary_unary_rpc_method_handler( + servicer.GetBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.GetBackupRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.serialize, + ), + "UpdateBackup": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.UpdateBackupRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.serialize, + ), + "DeleteBackup": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackup, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.DeleteBackupRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ListBackups": grpc.unary_unary_rpc_method_handler( + servicer.ListBackups, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsResponse.serialize, + ), + "RestoreDatabase": grpc.unary_unary_rpc_method_handler( + servicer.RestoreDatabase, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.deserialize, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "ListDatabaseOperations": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabaseOperations, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.serialize, + ), + "ListBackupOperations": grpc.unary_unary_rpc_method_handler( + servicer.ListBackupOperations, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsResponse.serialize, + ), + "ListDatabaseRoles": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabaseRoles, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesResponse.serialize, + ), + "CreateBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.CreateBackupScheduleRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize, + ), + "GetBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.GetBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.GetBackupScheduleRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize, + ), + "UpdateBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.UpdateBackupScheduleRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.serialize, + ), + "DeleteBackupSchedule": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackupSchedule, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.DeleteBackupScheduleRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ListBackupSchedules": grpc.unary_unary_rpc_method_handler( + servicer.ListBackupSchedules, + request_deserializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesRequest.deserialize, + response_serializer=google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesResponse.serialize, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers( + "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers + ) + + +# This class is part of an EXPERIMENTAL API. +class DatabaseAdmin(object): + """Cloud Spanner Database Admin API + + The Cloud Spanner Database Admin API can be used to: + * create, drop, and list databases + * update the schema of pre-existing databases + * create, delete, copy and list backups for a database + * restore a database from an existing backup + """ + + @staticmethod + def ListDatabases( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabases", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabasesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CreateDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.CreateDatabaseRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.Database.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateDatabaseDdl( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateDatabaseDdl", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.UpdateDatabaseDdlRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DropDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/DropDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.DropDatabaseRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetDatabaseDdl( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetDatabaseDdl", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.GetDatabaseDdlResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def SetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/SetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.SetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetIamPolicy( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetIamPolicy", + google_dot_iam_dot_v1_dot_iam__policy__pb2.GetIamPolicyRequest.SerializeToString, + google_dot_iam_dot_v1_dot_policy__pb2.Policy.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def TestIamPermissions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/TestIamPermissions", + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, + google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CreateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CreateBackupRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CopyBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CopyBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.CopyBackupRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.GetBackupRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.UpdateBackupRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.Backup.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DeleteBackup( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.DeleteBackupRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListBackups( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def RestoreDatabase( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.SerializeToString, + google_dot_longrunning_dot_operations__pb2.Operation.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListDatabaseOperations( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListBackupOperations( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__pb2.ListBackupOperationsResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListDatabaseRoles( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseRoles", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_spanner__database__admin__pb2.ListDatabaseRolesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def CreateBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.CreateBackupScheduleRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.GetBackupScheduleRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def UpdateBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.UpdateBackupScheduleRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.BackupSchedule.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DeleteBackupSchedule( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackupSchedule", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.DeleteBackupScheduleRequest.SerializeToString, + google_dot_protobuf_dot_empty__pb2.Empty.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListBackupSchedules( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupSchedules", + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesRequest.SerializeToString, + google_dot_spanner_dot_admin_dot_database_dot_v1_dot_backup__schedule__pb2.ListBackupSchedulesResponse.FromString, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) diff --git a/google/cloud/spanner_v1/testing/spanner_pb2_grpc.py b/google/cloud/spanner_v1/testing/spanner_pb2_grpc.py new file mode 100644 index 0000000000..c4622a6a34 --- /dev/null +++ b/google/cloud/spanner_v1/testing/spanner_pb2_grpc.py @@ -0,0 +1,882 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! + +# Generated with the following commands: +# +# pip install grpcio-tools +# git clone git@github.com:googleapis/googleapis.git +# cd googleapis +# python -m grpc_tools.protoc \ +# -I . \ +# --python_out=. --pyi_out=. --grpc_python_out=. \ +# ./google/spanner/v1/*.proto + +"""Client and server classes corresponding to protobuf-defined services.""" + +import grpc +from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 +from google.cloud.spanner_v1.types import ( + commit_response as google_dot_spanner_dot_v1_dot_commit__response__pb2, +) +from google.cloud.spanner_v1.types import ( + result_set as google_dot_spanner_dot_v1_dot_result__set__pb2, +) +from google.cloud.spanner_v1.types import ( + spanner as google_dot_spanner_dot_v1_dot_spanner__pb2, +) +from google.cloud.spanner_v1.types import ( + transaction as google_dot_spanner_dot_v1_dot_transaction__pb2, +) + +GRPC_GENERATED_VERSION = "1.67.0" +GRPC_VERSION = grpc.__version__ +_version_not_supported = False + +try: + from grpc._utilities import first_version_is_lower + + _version_not_supported = first_version_is_lower( + GRPC_VERSION, GRPC_GENERATED_VERSION + ) +except ImportError: + _version_not_supported = True + +if _version_not_supported: + raise RuntimeError( + f"The grpc package installed is at version {GRPC_VERSION}," + + " but the generated code in google/spanner/v1/spanner_pb2_grpc.py depends on" + + f" grpcio>={GRPC_GENERATED_VERSION}." + + f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}" + + f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}." + ) + + +class SpannerServicer(object): + """Cloud Spanner API + + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + """ + + def CreateSession(self, request, context): + """Creates a new session. A session can be used to perform + transactions that read and/or modify data in a Cloud Spanner database. + Sessions are meant to be reused for many consecutive + transactions. + + Sessions can only execute one transaction at a time. To execute + multiple concurrent read-write/write-only transactions, create + multiple sessions. Note that standalone reads and queries use a + transaction internally, and count toward the one transaction + limit. + + Active sessions use additional server resources, so it is a good idea to + delete idle and unneeded sessions. + Aside from explicit deletes, Cloud Spanner may delete sessions for which no + operations are sent for more than an hour. If a session is deleted, + requests to it return `NOT_FOUND`. + + Idle sessions can be kept alive by sending a trivial SQL query + periodically, e.g., `"SELECT 1"`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def BatchCreateSessions(self, request, context): + """Creates multiple new sessions. + + This API can be used to initialize a session cache on the clients. + See https://goo.gl/TgSFN2 for best practices on session cache management. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetSession(self, request, context): + """Gets a session. Returns `NOT_FOUND` if the session does not exist. + This is mainly useful for determining whether a session is still + alive. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListSessions(self, request, context): + """Lists all sessions in a given database.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteSession(self, request, context): + """Ends a session, releasing server resources associated with it. This will + asynchronously trigger cancellation of any operations that are running with + this session. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ExecuteSql(self, request, context): + """Executes an SQL statement, returning all results in a single reply. This + method cannot be used to return a result set larger than 10 MiB; + if the query yields more data than that, the query fails with + a `FAILED_PRECONDITION` error. + + Operations inside read-write transactions might return `ABORTED`. If + this occurs, the application should restart the transaction from + the beginning. See [Transaction][google.spanner.v1.Transaction] for more + details. + + Larger result sets can be fetched in streaming fashion by calling + [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] + instead. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ExecuteStreamingSql(self, request, context): + """Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the + result set as a stream. Unlike + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on + the size of the returned result set. However, no individual row in the + result set can exceed 100 MiB, and no column value can exceed 10 MiB. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ExecuteBatchDml(self, request, context): + """Executes a batch of SQL DML statements. This method allows many statements + to be run with lower latency than submitting them sequentially with + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. + + Statements are executed in sequential order. A request can succeed even if + a statement fails. The + [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] + field in the response provides information about the statement that failed. + Clients must inspect this field to determine whether an error occurred. + + Execution stops after the first failed statement; the remaining statements + are not executed. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def Read(self, request, context): + """Reads rows from the database using key lookups and scans, as a + simple key/value style alternative to + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be + used to return a result set larger than 10 MiB; if the read matches more + data than that, the read fails with a `FAILED_PRECONDITION` + error. + + Reads inside read-write transactions might return `ABORTED`. If + this occurs, the application should restart the transaction from + the beginning. See [Transaction][google.spanner.v1.Transaction] for more + details. + + Larger result sets can be yielded in streaming fashion by calling + [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def StreamingRead(self, request, context): + """Like [Read][google.spanner.v1.Spanner.Read], except returns the result set + as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no + limit on the size of the returned result set. However, no individual row in + the result set can exceed 100 MiB, and no column value can exceed + 10 MiB. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def BeginTransaction(self, request, context): + """Begins a new transaction. This step can often be skipped: + [Read][google.spanner.v1.Spanner.Read], + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a + side-effect. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def Commit(self, request, context): + """Commits a transaction. The request includes the mutations to be + applied to rows in the database. + + `Commit` might return an `ABORTED` error. This can occur at any time; + commonly, the cause is conflicts with concurrent + transactions. However, it can also happen for a variety of other + reasons. If `Commit` returns `ABORTED`, the caller should re-attempt + the transaction from the beginning, re-using the same session. + + On very rare occasions, `Commit` might return `UNKNOWN`. This can happen, + for example, if the client job experiences a 1+ hour networking failure. + At that point, Cloud Spanner has lost track of the transaction outcome and + we recommend that you perform another read from the database to see the + state of things as they are now. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def Rollback(self, request, context): + """Rolls back a transaction, releasing any locks it holds. It is a good + idea to call this for any transaction that includes one or more + [Read][google.spanner.v1.Spanner.Read] or + [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately + decides not to commit. + + `Rollback` returns `OK` if it successfully aborts the transaction, the + transaction was already aborted, or the transaction is not + found. `Rollback` never returns `ABORTED`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def PartitionQuery(self, request, context): + """Creates a set of partition tokens that can be used to execute a query + operation in parallel. Each of the returned partition tokens can be used + by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to + specify a subset of the query result to read. The same session and + read-only transaction must be used by the PartitionQueryRequest used to + create the partition tokens and the ExecuteSqlRequests that use the + partition tokens. + + Partition tokens become invalid when the session used to create them + is deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the query, and + the whole operation must be restarted from the beginning. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def PartitionRead(self, request, context): + """Creates a set of partition tokens that can be used to execute a read + operation in parallel. Each of the returned partition tokens can be used + by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a + subset of the read result to read. The same session and read-only + transaction must be used by the PartitionReadRequest used to create the + partition tokens and the ReadRequests that use the partition tokens. There + are no ordering guarantees on rows returned among the returned partition + tokens, or even within each individual StreamingRead call issued with a + partition_token. + + Partition tokens become invalid when the session used to create them + is deleted, is idle for too long, begins a new transaction, or becomes too + old. When any of these happen, it is not possible to resume the read, and + the whole operation must be restarted from the beginning. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def BatchWrite(self, request, context): + """Batches the supplied mutation groups in a collection of efficient + transactions. All mutations in a group are committed atomically. However, + mutations across groups can be committed non-atomically in an unspecified + order and thus, they must be independent of each other. Partial failure is + possible, i.e., some groups may have been committed successfully, while + some may have failed. The results of individual batches are streamed into + the response as the batches are applied. + + BatchWrite requests are not replay protected, meaning that each mutation + group may be applied more than once. Replays of non-idempotent mutations + may have undesirable effects. For example, replays of an insert mutation + may produce an already exists error or if you use generated or commit + timestamp-based keys, it may result in additional rows being added to the + mutation's table. We recommend structuring your mutation groups to be + idempotent to avoid this issue. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + +def add_SpannerServicer_to_server(servicer, server): + rpc_method_handlers = { + "CreateSession": grpc.unary_unary_rpc_method_handler( + servicer.CreateSession, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.CreateSessionRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.Session.serialize, + ), + "BatchCreateSessions": grpc.unary_unary_rpc_method_handler( + servicer.BatchCreateSessions, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsResponse.serialize, + ), + "GetSession": grpc.unary_unary_rpc_method_handler( + servicer.GetSession, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.GetSessionRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.Session.serialize, + ), + "ListSessions": grpc.unary_unary_rpc_method_handler( + servicer.ListSessions, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsResponse.serialize, + ), + "DeleteSession": grpc.unary_unary_rpc_method_handler( + servicer.DeleteSession, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.DeleteSessionRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ExecuteSql": grpc.unary_unary_rpc_method_handler( + servicer.ExecuteSql, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.serialize, + ), + "ExecuteStreamingSql": grpc.unary_stream_rpc_method_handler( + servicer.ExecuteStreamingSql, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.serialize, + ), + "ExecuteBatchDml": grpc.unary_unary_rpc_method_handler( + servicer.ExecuteBatchDml, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlResponse.serialize, + ), + "Read": grpc.unary_unary_rpc_method_handler( + servicer.Read, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.serialize, + ), + "StreamingRead": grpc.unary_stream_rpc_method_handler( + servicer.StreamingRead, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.serialize, + ), + "BeginTransaction": grpc.unary_unary_rpc_method_handler( + servicer.BeginTransaction, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BeginTransactionRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_transaction__pb2.Transaction.serialize, + ), + "Commit": grpc.unary_unary_rpc_method_handler( + servicer.Commit, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.CommitRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_commit__response__pb2.CommitResponse.serialize, + ), + "Rollback": grpc.unary_unary_rpc_method_handler( + servicer.Rollback, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.RollbackRequest.deserialize, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "PartitionQuery": grpc.unary_unary_rpc_method_handler( + servicer.PartitionQuery, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionQueryRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.serialize, + ), + "PartitionRead": grpc.unary_unary_rpc_method_handler( + servicer.PartitionRead, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionReadRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.serialize, + ), + "BatchWrite": grpc.unary_stream_rpc_method_handler( + servicer.BatchWrite, + request_deserializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteRequest.deserialize, + response_serializer=google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteResponse.serialize, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + "google.spanner.v1.Spanner", rpc_method_handlers + ) + server.add_generic_rpc_handlers((generic_handler,)) + server.add_registered_method_handlers( + "google.spanner.v1.Spanner", rpc_method_handlers + ) + + +# This class is part of an EXPERIMENTAL API. +class Spanner(object): + """Cloud Spanner API + + The Cloud Spanner API can be used to manage sessions and execute + transactions on data stored in Cloud Spanner databases. + """ + + @staticmethod + def CreateSession( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/CreateSession", + google_dot_spanner_dot_v1_dot_spanner__pb2.CreateSessionRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.Session.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def BatchCreateSessions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/BatchCreateSessions", + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchCreateSessionsResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def GetSession( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/GetSession", + google_dot_spanner_dot_v1_dot_spanner__pb2.GetSessionRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.Session.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ListSessions( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/ListSessions", + google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.ListSessionsResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def DeleteSession( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/DeleteSession", + google_dot_spanner_dot_v1_dot_spanner__pb2.DeleteSessionRequest.to_json, + google_dot_protobuf_dot_empty__pb2.Empty.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ExecuteSql( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/ExecuteSql", + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ExecuteStreamingSql( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.spanner.v1.Spanner/ExecuteStreamingSql", + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteSqlRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def ExecuteBatchDml( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/ExecuteBatchDml", + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.ExecuteBatchDmlResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def Read( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/Read", + google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.ResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def StreamingRead( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.spanner.v1.Spanner/StreamingRead", + google_dot_spanner_dot_v1_dot_spanner__pb2.ReadRequest.to_json, + google_dot_spanner_dot_v1_dot_result__set__pb2.PartialResultSet.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def BeginTransaction( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/BeginTransaction", + google_dot_spanner_dot_v1_dot_spanner__pb2.BeginTransactionRequest.to_json, + google_dot_spanner_dot_v1_dot_transaction__pb2.Transaction.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def Commit( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/Commit", + google_dot_spanner_dot_v1_dot_spanner__pb2.CommitRequest.to_json, + google_dot_spanner_dot_v1_dot_commit__response__pb2.CommitResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def Rollback( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/Rollback", + google_dot_spanner_dot_v1_dot_spanner__pb2.RollbackRequest.to_json, + google_dot_protobuf_dot_empty__pb2.Empty.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def PartitionQuery( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/PartitionQuery", + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionQueryRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def PartitionRead( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_unary( + request, + target, + "/google.spanner.v1.Spanner/PartitionRead", + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionReadRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.PartitionResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) + + @staticmethod + def BatchWrite( + request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None, + ): + return grpc.experimental.unary_stream( + request, + target, + "/google.spanner.v1.Spanner/BatchWrite", + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteRequest.to_json, + google_dot_spanner_dot_v1_dot_spanner__pb2.BatchWriteResponse.from_json, + options, + channel_credentials, + insecure, + call_credentials, + compression, + wait_for_ready, + timeout, + metadata, + _registered_method=True, + ) diff --git a/noxfile.py b/noxfile.py index f5a2761d73..905df735bc 100644 --- a/noxfile.py +++ b/noxfile.py @@ -33,6 +33,7 @@ LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" +DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12" UNIT_TEST_PYTHON_VERSIONS: List[str] = [ "3.7", @@ -234,6 +235,34 @@ def unit(session, protobuf_implementation): ) +@nox.session(python=DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION) +def mockserver(session): + # Install all test dependencies, then install this package in-place. + + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + # install_unittest_dependencies(session, "-c", constraints_path) + standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES + session.install(*standard_deps, "-c", constraints_path) + session.install("-e", ".", "-c", constraints_path) + + # Run py.test against the mockserver tests. + session.run( + "py.test", + "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", + "--cov=google", + "--cov=tests/unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + os.path.join("tests", "mockserver_tests"), + *session.posargs, + ) + + def install_systemtest_dependencies(session, *constraints): # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. diff --git a/tests/mockserver_tests/__init__.py b/tests/mockserver_tests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/mockserver_tests/test_basics.py b/tests/mockserver_tests/test_basics.py new file mode 100644 index 0000000000..f2dab9af06 --- /dev/null +++ b/tests/mockserver_tests/test_basics.py @@ -0,0 +1,151 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer +from google.cloud.spanner_v1.testing.mock_spanner import ( + start_mock_server, + SpannerServicer, +) +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set +from google.api_core.client_options import ClientOptions +from google.auth.credentials import AnonymousCredentials +from google.cloud.spanner_v1 import ( + Client, + FixedSizePool, + BatchCreateSessionsRequest, + ExecuteSqlRequest, + GetSessionRequest, +) +from google.cloud.spanner_v1.database import Database +from google.cloud.spanner_v1.instance import Instance +import grpc + + +class TestBasics(unittest.TestCase): + server: grpc.Server = None + spanner_service: SpannerServicer = None + database_admin_service: DatabaseAdminServicer = None + port: int = None + + def __init__(self, *args, **kwargs): + super(TestBasics, self).__init__(*args, **kwargs) + self._client = None + self._instance = None + self._database = None + + @classmethod + def setUpClass(cls): + ( + TestBasics.server, + TestBasics.spanner_service, + TestBasics.database_admin_service, + TestBasics.port, + ) = start_mock_server() + + @classmethod + def tearDownClass(cls): + if TestBasics.server is not None: + TestBasics.server.stop(grace=None) + TestBasics.server = None + + def _add_select1_result(self): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name="c", + type=spanner_type.Type( + dict(code=spanner_type.TypeCode.INT64) + ), + ) + ) + ] + ) + ) + ) + ), + ) + ) + result.rows.extend(["1"]) + TestBasics.spanner_service.mock_spanner.add_result("select 1", result) + + @property + def client(self) -> Client: + if self._client is None: + self._client = Client( + project="test-project", + credentials=AnonymousCredentials(), + client_options=ClientOptions( + api_endpoint="localhost:" + str(TestBasics.port), + ), + ) + return self._client + + @property + def instance(self) -> Instance: + if self._instance is None: + self._instance = self.client.instance("test-instance") + return self._instance + + @property + def database(self) -> Database: + if self._database is None: + self._database = self.instance.database( + "test-database", pool=FixedSizePool(size=10) + ) + return self._database + + def test_select1(self): + self._add_select1_result() + with self.database.snapshot() as snapshot: + results = snapshot.execute_sql("select 1") + result_list = [] + for row in results: + result_list.append(row) + self.assertEqual(1, row[0]) + self.assertEqual(1, len(result_list)) + requests = self.spanner_service.requests + self.assertEqual(3, len(requests)) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + # TODO: Optimize FixedSizePool so this GetSessionRequest is not executed + # every time a session is fetched. + self.assertTrue(isinstance(requests[1], GetSessionRequest)) + self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) + + def test_create_table(self): + database_admin_api = self.client.database_admin_api + request = spanner_database_admin.UpdateDatabaseDdlRequest( + dict( + database=database_admin_api.database_path( + "test-project", "test-instance", "test-database" + ), + statements=[ + "CREATE TABLE Test (" + "Id INT64, " + "Value STRING(MAX)) " + "PRIMARY KEY (Id)", + ], + ) + ) + operation = database_admin_api.update_database_ddl(request) + operation.result(1) From a214885ed474f3d69875ef580d5f8cbbabe9199a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Thu, 5 Dec 2024 12:50:12 +0100 Subject: [PATCH 08/13] fix: allow setting staleness to same value in tx (#1253) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: allow setting staleness to same value in tx Repeatedly setting the staleness property of a connection in a transaction to the same value caused an error. This made it harder to use this property in SQLAlchemy. Updates https://github.com/googleapis/python-spanner-sqlalchemy/issues/495 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Revert "🦉 Updates from OwlBot post-processor" This reverts commit 282a9828507ca3511b37c81a1c10f6c0622e79ad. --------- Co-authored-by: Owl Bot --- google/cloud/spanner_dbapi/connection.py | 2 +- tests/unit/spanner_dbapi/test_connection.py | 14 ++++++++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/google/cloud/spanner_dbapi/connection.py b/google/cloud/spanner_dbapi/connection.py index 416bb2a959..cec6c64dac 100644 --- a/google/cloud/spanner_dbapi/connection.py +++ b/google/cloud/spanner_dbapi/connection.py @@ -277,7 +277,7 @@ def staleness(self, value): Args: value (dict): Staleness type and value. """ - if self._spanner_transaction_started: + if self._spanner_transaction_started and value != self._staleness: raise ValueError( "`staleness` option can't be changed while a transaction is in progress. " "Commit or rollback the current transaction and try again." diff --git a/tests/unit/spanner_dbapi/test_connection.py b/tests/unit/spanner_dbapi/test_connection.py index a07e94735f..4bee9e93c7 100644 --- a/tests/unit/spanner_dbapi/test_connection.py +++ b/tests/unit/spanner_dbapi/test_connection.py @@ -669,6 +669,20 @@ def test_staleness_inside_transaction(self): with self.assertRaises(ValueError): connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)} + def test_staleness_inside_transaction_same_value(self): + """ + Verify that setting `staleness` to the same value in a transaction is allowed. + """ + connection = self._make_connection() + connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)} + connection._spanner_transaction_started = True + connection._transaction = mock.Mock() + + connection.staleness = {"read_timestamp": datetime.datetime(2021, 9, 21)} + self.assertEqual( + connection.staleness, {"read_timestamp": datetime.datetime(2021, 9, 21)} + ) + def test_staleness_multi_use(self): """ Check that `staleness` option is correctly From c064815abaaa4b564edd6f0e365a37e7e839080c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Thu, 5 Dec 2024 16:09:04 +0100 Subject: [PATCH 09/13] perf: remove repeated GetSession calls for FixedSizePool (#1252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: add mock server tests * chore: move to testing folder + fix formatting * refactor: move mock server tests to separate directory * feat: add database admin service Adds a DatabaseAdminService to the mock server and sets up a basic test case for this. Also removes the generated stubs in the grpc files, as these are not needed. * test: add DDL test * test: add async client tests * chore: remove async + add transaction handling * chore: cleanup * perf: remove repeated GetSession calls for FixedSizePool Add a _last_use_time to Session and use this to determine whether the FixedSizePool should check whether the session still exists, and whether it should be replaced. This significantly reduces the number of times that GetSession is called when using FixedSizePool. * chore: run code formatter * chore: revert to utcnow() * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: update _last_use_time in trace_call * chore: fix formatting * fix: remove unnecessary update of _last_use_time --------- Co-authored-by: Owl Bot --- .../spanner_v1/_opentelemetry_tracing.py | 4 +++ google/cloud/spanner_v1/pool.py | 9 ++++-- google/cloud/spanner_v1/session.py | 11 +++++++ google/cloud/spanner_v1/snapshot.py | 2 ++ tests/mockserver_tests/test_basics.py | 8 ++--- tests/unit/test_pool.py | 32 +++++++++++++++++-- 6 files changed, 55 insertions(+), 11 deletions(-) diff --git a/google/cloud/spanner_v1/_opentelemetry_tracing.py b/google/cloud/spanner_v1/_opentelemetry_tracing.py index feb3b92756..efbeea05e7 100644 --- a/google/cloud/spanner_v1/_opentelemetry_tracing.py +++ b/google/cloud/spanner_v1/_opentelemetry_tracing.py @@ -15,6 +15,7 @@ """Manages OpenTelemetry trace creation and handling""" from contextlib import contextmanager +from datetime import datetime import os from google.cloud.spanner_v1 import SpannerClient @@ -56,6 +57,9 @@ def get_tracer(tracer_provider=None): @contextmanager def trace_call(name, session, extra_attributes=None, observability_options=None): + if session: + session._last_use_time = datetime.now() + if not HAS_OPENTELEMETRY_INSTALLED or not session: # Empty context manager. Users will have to check if the generated value is None or a span yield None diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 56837bfc0b..c95ef7a7b9 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -145,7 +145,8 @@ class FixedSizePool(AbstractSessionPool): - Pre-allocates / creates a fixed number of sessions. - "Pings" existing sessions via :meth:`session.exists` before returning - them, and replaces expired sessions. + sessions that have not been used for more than 55 minutes and replaces + expired sessions. - Blocks, with a timeout, when :meth:`get` is called on an empty pool. Raises after timing out. @@ -171,6 +172,7 @@ class FixedSizePool(AbstractSessionPool): DEFAULT_SIZE = 10 DEFAULT_TIMEOUT = 10 + DEFAULT_MAX_AGE_MINUTES = 55 def __init__( self, @@ -178,11 +180,13 @@ def __init__( default_timeout=DEFAULT_TIMEOUT, labels=None, database_role=None, + max_age_minutes=DEFAULT_MAX_AGE_MINUTES, ): super(FixedSizePool, self).__init__(labels=labels, database_role=database_role) self.size = size self.default_timeout = default_timeout self._sessions = queue.LifoQueue(size) + self._max_age = datetime.timedelta(minutes=max_age_minutes) def bind(self, database): """Associate the pool with a database. @@ -230,8 +234,9 @@ def get(self, timeout=None): timeout = self.default_timeout session = self._sessions.get(block=True, timeout=timeout) + age = _NOW() - session.last_use_time - if not session.exists(): + if age >= self._max_age and not session.exists(): session = self._database.session() session.create() diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 6281148590..539f36af2b 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -17,6 +17,7 @@ from functools import total_ordering import random import time +from datetime import datetime from google.api_core.exceptions import Aborted from google.api_core.exceptions import GoogleAPICallError @@ -69,6 +70,7 @@ def __init__(self, database, labels=None, database_role=None): labels = {} self._labels = labels self._database_role = database_role + self._last_use_time = datetime.utcnow() def __lt__(self, other): return self._session_id < other._session_id @@ -78,6 +80,14 @@ def session_id(self): """Read-only ID, set by the back-end during :meth:`create`.""" return self._session_id + @property + def last_use_time(self): + """ "Approximate last use time of this session + + :rtype: datetime + :returns: the approximate last use time of this session""" + return self._last_use_time + @property def database_role(self): """User-assigned database-role for the session. @@ -222,6 +232,7 @@ def ping(self): metadata = _metadata_with_prefix(self._database.name) request = ExecuteSqlRequest(session=self.name, sql="SELECT 1") api.execute_sql(request=request, metadata=metadata) + self._last_use_time = datetime.now() def snapshot(self, **kw): """Create a snapshot to perform a set of reads with shared staleness. diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index 143e17c503..89b5094706 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -14,6 +14,7 @@ """Model a set of read-only queries to a database as a snapshot.""" +from datetime import datetime import functools import threading from google.protobuf.struct_pb2 import Struct @@ -364,6 +365,7 @@ def read( ) self._read_request_count += 1 + self._session._last_use_time = datetime.now() if self._multi_use: return StreamedResultSet( diff --git a/tests/mockserver_tests/test_basics.py b/tests/mockserver_tests/test_basics.py index f2dab9af06..12a224314f 100644 --- a/tests/mockserver_tests/test_basics.py +++ b/tests/mockserver_tests/test_basics.py @@ -29,7 +29,6 @@ FixedSizePool, BatchCreateSessionsRequest, ExecuteSqlRequest, - GetSessionRequest, ) from google.cloud.spanner_v1.database import Database from google.cloud.spanner_v1.instance import Instance @@ -125,12 +124,9 @@ def test_select1(self): self.assertEqual(1, row[0]) self.assertEqual(1, len(result_list)) requests = self.spanner_service.requests - self.assertEqual(3, len(requests)) + self.assertEqual(2, len(requests), msg=requests) self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) - # TODO: Optimize FixedSizePool so this GetSessionRequest is not executed - # every time a session is fetched. - self.assertTrue(isinstance(requests[1], GetSessionRequest)) - self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) + self.assertTrue(isinstance(requests[1], ExecuteSqlRequest)) def test_create_table(self): database_admin_api = self.client.database_admin_api diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index 23ed3e7251..2e3b46fa73 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -15,6 +15,7 @@ from functools import total_ordering import unittest +from datetime import datetime, timedelta import mock @@ -184,13 +185,30 @@ def test_bind(self): for session in SESSIONS: session.create.assert_not_called() - def test_get_non_expired(self): + def test_get_active(self): pool = self._make_one(size=4) database = _Database("name") SESSIONS = sorted([_Session(database) for i in range(0, 4)]) database._sessions.extend(SESSIONS) pool.bind(database) + # check if sessions returned in LIFO order + for i in (3, 2, 1, 0): + session = pool.get() + self.assertIs(session, SESSIONS[i]) + self.assertFalse(session._exists_checked) + self.assertFalse(pool._sessions.full()) + + def test_get_non_expired(self): + pool = self._make_one(size=4) + database = _Database("name") + last_use_time = datetime.utcnow() - timedelta(minutes=56) + SESSIONS = sorted( + [_Session(database, last_use_time=last_use_time) for i in range(0, 4)] + ) + database._sessions.extend(SESSIONS) + pool.bind(database) + # check if sessions returned in LIFO order for i in (3, 2, 1, 0): session = pool.get() @@ -201,7 +219,8 @@ def test_get_non_expired(self): def test_get_expired(self): pool = self._make_one(size=4) database = _Database("name") - SESSIONS = [_Session(database)] * 5 + last_use_time = datetime.utcnow() - timedelta(minutes=65) + SESSIONS = [_Session(database, last_use_time=last_use_time)] * 5 SESSIONS[0]._exists = False database._sessions.extend(SESSIONS) pool.bind(database) @@ -915,7 +934,9 @@ def _make_transaction(*args, **kw): class _Session(object): _transaction = None - def __init__(self, database, exists=True, transaction=None): + def __init__( + self, database, exists=True, transaction=None, last_use_time=datetime.utcnow() + ): self._database = database self._exists = exists self._exists_checked = False @@ -923,10 +944,15 @@ def __init__(self, database, exists=True, transaction=None): self.create = mock.Mock() self._deleted = False self._transaction = transaction + self._last_use_time = last_use_time def __lt__(self, other): return id(self) < id(other) + @property + def last_use_time(self): + return self._last_use_time + def exists(self): self._exists_checked = True return self._exists From a81af3bb1bee1adc92cafcd80dba6599c5370217 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Thu, 5 Dec 2024 17:32:08 +0100 Subject: [PATCH 10/13] build: add mock server tests to Owlbot config (#1254) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * build: add mock server tests to Owlbot config * chore: add escapes * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- noxfile.py | 2 +- owlbot.py | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/noxfile.py b/noxfile.py index 905df735bc..f32c24f1e3 100644 --- a/noxfile.py +++ b/noxfile.py @@ -33,8 +33,8 @@ LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12" +DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12" UNIT_TEST_PYTHON_VERSIONS: List[str] = [ "3.7", "3.8", diff --git a/owlbot.py b/owlbot.py index c215f26946..e7fb391c2a 100644 --- a/owlbot.py +++ b/owlbot.py @@ -307,4 +307,49 @@ def prerelease_deps\(session, protobuf_implementation\):""", def prerelease_deps(session, protobuf_implementation, database_dialect):""", ) + +mockserver_test = """ +@nox.session(python=DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION) +def mockserver(session): + # Install all test dependencies, then install this package in-place. + + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + # install_unittest_dependencies(session, "-c", constraints_path) + standard_deps = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_DEPENDENCIES + session.install(*standard_deps, "-c", constraints_path) + session.install("-e", ".", "-c", constraints_path) + + # Run py.test against the mockserver tests. + session.run( + "py.test", + "--quiet", + f"--junitxml=unit_{session.python}_sponge_log.xml", + "--cov=google", + "--cov=tests/unit", + "--cov-append", + "--cov-config=.coveragerc", + "--cov-report=", + "--cov-fail-under=0", + os.path.join("tests", "mockserver_tests"), + *session.posargs, + ) + +""" + +place_before( + "noxfile.py", + "def install_systemtest_dependencies(session, *constraints):", + mockserver_test, + escape="()_*:", +) + +place_before( + "noxfile.py", + "UNIT_TEST_PYTHON_VERSIONS: List[str] = [", + 'DEFAULT_MOCK_SERVER_TESTS_PYTHON_VERSION = "3.12"', + escape="[]", +) + s.shell.run(["nox", "-s", "blacken"], hide_output=False) From 758bf4889a7f3346bc8282a3eed47aee43be650c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Thu, 5 Dec 2024 19:22:26 +0100 Subject: [PATCH 11/13] fix: dbapi raised AttributeError with [] as arguments (#1257) If the cursor.execute(sql, args) function was called with an empty array instead of None, it would raise an AttributeError like this: AttributeError: 'list' object has no attribute 'items' This is for example automatically done by SQLAlchemy when executing a raw statement on a dbapi connection. --- google/cloud/spanner_v1/transaction.py | 2 +- tests/mockserver_tests/test_basics.py | 45 ++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index beb3e46edb..d99c4fde2f 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -308,7 +308,7 @@ def _make_params_pb(params, param_types): :raises ValueError: If ``params`` is None but ``param_types`` is not None. """ - if params is not None: + if params: return Struct( fields={key: _make_value_pb(value) for key, value in params.items()} ) diff --git a/tests/mockserver_tests/test_basics.py b/tests/mockserver_tests/test_basics.py index 12a224314f..9d6dad095e 100644 --- a/tests/mockserver_tests/test_basics.py +++ b/tests/mockserver_tests/test_basics.py @@ -15,6 +15,8 @@ import unittest from google.cloud.spanner_admin_database_v1.types import spanner_database_admin +from google.cloud.spanner_dbapi import Connection +from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer from google.cloud.spanner_v1.testing.mock_spanner import ( start_mock_server, @@ -29,6 +31,8 @@ FixedSizePool, BatchCreateSessionsRequest, ExecuteSqlRequest, + BeginTransactionRequest, + TransactionOptions, ) from google.cloud.spanner_v1.database import Database from google.cloud.spanner_v1.instance import Instance @@ -62,6 +66,10 @@ def tearDownClass(cls): TestBasics.server.stop(grace=None) TestBasics.server = None + def teardown_method(self, *args, **kwargs): + TestBasics.spanner_service.clear_requests() + TestBasics.database_admin_service.clear_requests() + def _add_select1_result(self): result = result_set.ResultSet( dict( @@ -88,6 +96,19 @@ def _add_select1_result(self): result.rows.extend(["1"]) TestBasics.spanner_service.mock_spanner.add_result("select 1", result) + def add_update_count( + self, + sql: str, + count: int, + dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL, + ): + if dml_mode == AutocommitDmlMode.PARTITIONED_NON_ATOMIC: + stats = dict(row_count_lower_bound=count) + else: + stats = dict(row_count_exact=count) + result = result_set.ResultSet(dict(stats=result_set.ResultSetStats(stats))) + TestBasics.spanner_service.mock_spanner.add_result(sql, result) + @property def client(self) -> Client: if self._client is None: @@ -145,3 +166,27 @@ def test_create_table(self): ) operation = database_admin_api.update_database_ddl(request) operation.result(1) + + # TODO: Move this to a separate class once the mock server test setup has + # been re-factored to use a base class for the boiler plate code. + def test_dbapi_partitioned_dml(self): + sql = "UPDATE singers SET foo='bar' WHERE active = true" + self.add_update_count(sql, 100, AutocommitDmlMode.PARTITIONED_NON_ATOMIC) + connection = Connection(self.instance, self.database) + connection.autocommit = True + connection.set_autocommit_dml_mode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC) + with connection.cursor() as cursor: + # Note: SQLAlchemy uses [] as the list of parameters for statements + # with no parameters. + cursor.execute(sql, []) + self.assertEqual(100, cursor.rowcount) + + requests = self.spanner_service.requests + self.assertEqual(3, len(requests), msg=requests) + self.assertTrue(isinstance(requests[0], BatchCreateSessionsRequest)) + self.assertTrue(isinstance(requests[1], BeginTransactionRequest)) + self.assertTrue(isinstance(requests[2], ExecuteSqlRequest)) + begin_request: BeginTransactionRequest = requests[1] + self.assertEqual( + TransactionOptions(dict(partitioned_dml={})), begin_request.options + ) From 96da8e1c306c06ec08108768edcc2d5d5ea103bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Knut=20Olav=20L=C3=B8ite?= Date: Thu, 5 Dec 2024 21:42:19 +0100 Subject: [PATCH 12/13] test: create base class for mockserver tests (#1255) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test: create base class for mockserver tests Move the boiler-plate code for mockserver tests to a separate class, so this can easily be re-used for other tests. * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../mockserver_tests/mock_server_test_base.py | 139 ++++++++++++++++++ tests/mockserver_tests/test_basics.py | 121 +-------------- 2 files changed, 147 insertions(+), 113 deletions(-) create mode 100644 tests/mockserver_tests/mock_server_test_base.py diff --git a/tests/mockserver_tests/mock_server_test_base.py b/tests/mockserver_tests/mock_server_test_base.py new file mode 100644 index 0000000000..1cd7656297 --- /dev/null +++ b/tests/mockserver_tests/mock_server_test_base.py @@ -0,0 +1,139 @@ +# Copyright 2024 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode +from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer +from google.cloud.spanner_v1.testing.mock_spanner import ( + start_mock_server, + SpannerServicer, +) +import google.cloud.spanner_v1.types.type as spanner_type +import google.cloud.spanner_v1.types.result_set as result_set +from google.api_core.client_options import ClientOptions +from google.auth.credentials import AnonymousCredentials +from google.cloud.spanner_v1 import Client, TypeCode, FixedSizePool +from google.cloud.spanner_v1.database import Database +from google.cloud.spanner_v1.instance import Instance +import grpc + + +def add_result(sql: str, result: result_set.ResultSet): + MockServerTestBase.spanner_service.mock_spanner.add_result(sql, result) + + +def add_update_count( + sql: str, count: int, dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL +): + if dml_mode == AutocommitDmlMode.PARTITIONED_NON_ATOMIC: + stats = dict(row_count_lower_bound=count) + else: + stats = dict(row_count_exact=count) + result = result_set.ResultSet(dict(stats=result_set.ResultSetStats(stats))) + add_result(sql, result) + + +def add_select1_result(): + add_single_result("select 1", "c", TypeCode.INT64, [("1",)]) + + +def add_single_result( + sql: str, column_name: str, type_code: spanner_type.TypeCode, row +): + result = result_set.ResultSet( + dict( + metadata=result_set.ResultSetMetadata( + dict( + row_type=spanner_type.StructType( + dict( + fields=[ + spanner_type.StructType.Field( + dict( + name=column_name, + type=spanner_type.Type(dict(code=type_code)), + ) + ) + ] + ) + ) + ) + ), + ) + ) + result.rows.extend(row) + MockServerTestBase.spanner_service.mock_spanner.add_result(sql, result) + + +class MockServerTestBase(unittest.TestCase): + server: grpc.Server = None + spanner_service: SpannerServicer = None + database_admin_service: DatabaseAdminServicer = None + port: int = None + + def __init__(self, *args, **kwargs): + super(MockServerTestBase, self).__init__(*args, **kwargs) + self._client = None + self._instance = None + self._database = None + + @classmethod + def setup_class(cls): + ( + MockServerTestBase.server, + MockServerTestBase.spanner_service, + MockServerTestBase.database_admin_service, + MockServerTestBase.port, + ) = start_mock_server() + + @classmethod + def teardown_class(cls): + if MockServerTestBase.server is not None: + MockServerTestBase.server.stop(grace=None) + MockServerTestBase.server = None + + def setup_method(self, *args, **kwargs): + self._client = None + self._instance = None + self._database = None + + def teardown_method(self, *args, **kwargs): + MockServerTestBase.spanner_service.clear_requests() + MockServerTestBase.database_admin_service.clear_requests() + + @property + def client(self) -> Client: + if self._client is None: + self._client = Client( + project="p", + credentials=AnonymousCredentials(), + client_options=ClientOptions( + api_endpoint="localhost:" + str(MockServerTestBase.port), + ), + ) + return self._client + + @property + def instance(self) -> Instance: + if self._instance is None: + self._instance = self.client.instance("test-instance") + return self._instance + + @property + def database(self) -> Database: + if self._database is None: + self._database = self.instance.database( + "test-database", pool=FixedSizePool(size=10) + ) + return self._database diff --git a/tests/mockserver_tests/test_basics.py b/tests/mockserver_tests/test_basics.py index 9d6dad095e..ed0906cb9b 100644 --- a/tests/mockserver_tests/test_basics.py +++ b/tests/mockserver_tests/test_basics.py @@ -12,131 +12,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -import unittest - from google.cloud.spanner_admin_database_v1.types import spanner_database_admin from google.cloud.spanner_dbapi import Connection from google.cloud.spanner_dbapi.parsed_statement import AutocommitDmlMode -from google.cloud.spanner_v1.testing.mock_database_admin import DatabaseAdminServicer -from google.cloud.spanner_v1.testing.mock_spanner import ( - start_mock_server, - SpannerServicer, -) -import google.cloud.spanner_v1.types.type as spanner_type -import google.cloud.spanner_v1.types.result_set as result_set -from google.api_core.client_options import ClientOptions -from google.auth.credentials import AnonymousCredentials from google.cloud.spanner_v1 import ( - Client, - FixedSizePool, BatchCreateSessionsRequest, ExecuteSqlRequest, BeginTransactionRequest, TransactionOptions, ) -from google.cloud.spanner_v1.database import Database -from google.cloud.spanner_v1.instance import Instance -import grpc - - -class TestBasics(unittest.TestCase): - server: grpc.Server = None - spanner_service: SpannerServicer = None - database_admin_service: DatabaseAdminServicer = None - port: int = None - - def __init__(self, *args, **kwargs): - super(TestBasics, self).__init__(*args, **kwargs) - self._client = None - self._instance = None - self._database = None - @classmethod - def setUpClass(cls): - ( - TestBasics.server, - TestBasics.spanner_service, - TestBasics.database_admin_service, - TestBasics.port, - ) = start_mock_server() - - @classmethod - def tearDownClass(cls): - if TestBasics.server is not None: - TestBasics.server.stop(grace=None) - TestBasics.server = None - - def teardown_method(self, *args, **kwargs): - TestBasics.spanner_service.clear_requests() - TestBasics.database_admin_service.clear_requests() - - def _add_select1_result(self): - result = result_set.ResultSet( - dict( - metadata=result_set.ResultSetMetadata( - dict( - row_type=spanner_type.StructType( - dict( - fields=[ - spanner_type.StructType.Field( - dict( - name="c", - type=spanner_type.Type( - dict(code=spanner_type.TypeCode.INT64) - ), - ) - ) - ] - ) - ) - ) - ), - ) - ) - result.rows.extend(["1"]) - TestBasics.spanner_service.mock_spanner.add_result("select 1", result) - - def add_update_count( - self, - sql: str, - count: int, - dml_mode: AutocommitDmlMode = AutocommitDmlMode.TRANSACTIONAL, - ): - if dml_mode == AutocommitDmlMode.PARTITIONED_NON_ATOMIC: - stats = dict(row_count_lower_bound=count) - else: - stats = dict(row_count_exact=count) - result = result_set.ResultSet(dict(stats=result_set.ResultSetStats(stats))) - TestBasics.spanner_service.mock_spanner.add_result(sql, result) - - @property - def client(self) -> Client: - if self._client is None: - self._client = Client( - project="test-project", - credentials=AnonymousCredentials(), - client_options=ClientOptions( - api_endpoint="localhost:" + str(TestBasics.port), - ), - ) - return self._client - - @property - def instance(self) -> Instance: - if self._instance is None: - self._instance = self.client.instance("test-instance") - return self._instance +from tests.mockserver_tests.mock_server_test_base import ( + MockServerTestBase, + add_select1_result, + add_update_count, +) - @property - def database(self) -> Database: - if self._database is None: - self._database = self.instance.database( - "test-database", pool=FixedSizePool(size=10) - ) - return self._database +class TestBasics(MockServerTestBase): def test_select1(self): - self._add_select1_result() + add_select1_result() with self.database.snapshot() as snapshot: results = snapshot.execute_sql("select 1") result_list = [] @@ -171,7 +66,7 @@ def test_create_table(self): # been re-factored to use a base class for the boiler plate code. def test_dbapi_partitioned_dml(self): sql = "UPDATE singers SET foo='bar' WHERE active = true" - self.add_update_count(sql, 100, AutocommitDmlMode.PARTITIONED_NON_ATOMIC) + add_update_count(sql, 100, AutocommitDmlMode.PARTITIONED_NON_ATOMIC) connection = Connection(self.instance, self.database) connection.autocommit = True connection.set_autocommit_dml_mode(AutocommitDmlMode.PARTITIONED_NON_ATOMIC) From 4829013ec53455720446c19f0a31cc664a420b50 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Fri, 6 Dec 2024 11:17:12 +0530 Subject: [PATCH 13/13] chore(main): release 3.51.0 (#1240) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> Co-authored-by: Sri Harsha CH <57220027+harshachinta@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 27 +++++++++++++++++++ .../gapic_version.py | 2 +- .../gapic_version.py | 2 +- google/cloud/spanner_v1/gapic_version.py | 2 +- ...data_google.spanner.admin.database.v1.json | 2 +- ...data_google.spanner.admin.instance.v1.json | 2 +- .../snippet_metadata_google.spanner.v1.json | 2 +- 8 files changed, 34 insertions(+), 7 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 7c20592b72..b4ec2efce5 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "3.50.1" + ".": "3.51.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index 43229596ba..4d2eb31d6a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,33 @@ [1]: https://pypi.org/project/google-cloud-spanner/#history +## [3.51.0](https://github.com/googleapis/python-spanner/compare/v3.50.1...v3.51.0) (2024-12-05) + + +### Features + +* Add connection variable for ignoring transaction warnings ([#1249](https://github.com/googleapis/python-spanner/issues/1249)) ([eeb7836](https://github.com/googleapis/python-spanner/commit/eeb7836b6350aa9626dfb733208e6827d38bb9c9)) +* **spanner:** Implement custom tracer_provider injection for opentelemetry traces ([#1229](https://github.com/googleapis/python-spanner/issues/1229)) ([6869ed6](https://github.com/googleapis/python-spanner/commit/6869ed651e41d7a8af046884bc6c792a4177f766)) +* Support float32 parameters in dbapi ([#1245](https://github.com/googleapis/python-spanner/issues/1245)) ([829b799](https://github.com/googleapis/python-spanner/commit/829b799e0c9c6da274bf95c272cda564cfdba928)) + + +### Bug Fixes + +* Allow setting connection.read_only to same value ([#1247](https://github.com/googleapis/python-spanner/issues/1247)) ([5e8ca94](https://github.com/googleapis/python-spanner/commit/5e8ca949b583fbcf0b92b42696545973aad8c78f)) +* Allow setting staleness to same value in tx ([#1253](https://github.com/googleapis/python-spanner/issues/1253)) ([a214885](https://github.com/googleapis/python-spanner/commit/a214885ed474f3d69875ef580d5f8cbbabe9199a)) +* Dbapi raised AttributeError with [] as arguments ([#1257](https://github.com/googleapis/python-spanner/issues/1257)) ([758bf48](https://github.com/googleapis/python-spanner/commit/758bf4889a7f3346bc8282a3eed47aee43be650c)) + + +### Performance Improvements + +* Optimize ResultSet decoding ([#1244](https://github.com/googleapis/python-spanner/issues/1244)) ([ccae6e0](https://github.com/googleapis/python-spanner/commit/ccae6e0287ba6cf3c14f15a907b2106b11ef1fdc)) +* Remove repeated GetSession calls for FixedSizePool ([#1252](https://github.com/googleapis/python-spanner/issues/1252)) ([c064815](https://github.com/googleapis/python-spanner/commit/c064815abaaa4b564edd6f0e365a37e7e839080c)) + + +### Documentation + +* **samples:** Add samples for Cloud Spanner Default Backup Schedules ([#1238](https://github.com/googleapis/python-spanner/issues/1238)) ([054a186](https://github.com/googleapis/python-spanner/commit/054a18658eedc5d4dbecb7508baa3f3d67f5b815)) + ## [3.50.1](https://github.com/googleapis/python-spanner/compare/v3.50.0...v3.50.1) (2024-11-14) diff --git a/google/cloud/spanner_admin_database_v1/gapic_version.py b/google/cloud/spanner_admin_database_v1/gapic_version.py index 873057e050..99e11c0cb5 100644 --- a/google/cloud/spanner_admin_database_v1/gapic_version.py +++ b/google/cloud/spanner_admin_database_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.50.1" # {x-release-please-version} +__version__ = "3.51.0" # {x-release-please-version} diff --git a/google/cloud/spanner_admin_instance_v1/gapic_version.py b/google/cloud/spanner_admin_instance_v1/gapic_version.py index 873057e050..99e11c0cb5 100644 --- a/google/cloud/spanner_admin_instance_v1/gapic_version.py +++ b/google/cloud/spanner_admin_instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.50.1" # {x-release-please-version} +__version__ = "3.51.0" # {x-release-please-version} diff --git a/google/cloud/spanner_v1/gapic_version.py b/google/cloud/spanner_v1/gapic_version.py index 873057e050..99e11c0cb5 100644 --- a/google/cloud/spanner_v1/gapic_version.py +++ b/google/cloud/spanner_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "3.50.1" # {x-release-please-version} +__version__ = "3.51.0" # {x-release-please-version} diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json index 9324f2056b..7c35814b17 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.database.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-database", - "version": "3.50.1" + "version": "3.51.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json index 7f64769236..261a7d44f3 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.admin.instance.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner-admin-instance", - "version": "3.50.1" + "version": "3.51.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.spanner.v1.json b/samples/generated_samples/snippet_metadata_google.spanner.v1.json index 431109d19e..ddb4419273 100644 --- a/samples/generated_samples/snippet_metadata_google.spanner.v1.json +++ b/samples/generated_samples/snippet_metadata_google.spanner.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-spanner", - "version": "3.50.1" + "version": "3.51.0" }, "snippets": [ { pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy